var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F4660314%2Fitems%3Fkey%3DzDwkcAPCx8V4jFvrBgF4iQfx%26format%3Dbibtex%26limit%3D100&jsonp=1&groupby=type&sort=-year&filter=type:(article|phdthesis|inproceedings|patent|misc)&folding=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F4660314%2Fitems%3Fkey%3DzDwkcAPCx8V4jFvrBgF4iQfx%26format%3Dbibtex%26limit%3D100&jsonp=1&groupby=type&sort=-year&filter=type:(article|phdthesis|inproceedings|patent|misc)&folding=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F4660314%2Fitems%3Fkey%3DzDwkcAPCx8V4jFvrBgF4iQfx%26format%3Dbibtex%26limit%3D100&jsonp=1&groupby=type&sort=-year&filter=type:(article|phdthesis|inproceedings|patent|misc)&folding=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n article\n \n \n (63)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A multi-target regression method to predict element concentrations in tomato leaves using hyperspectral imaging.\n \n \n \n \n\n\n \n Kamiya, T.; Ariza, A. A.; Sotta, N.; Fujiwara, T.; and Guo, W.\n\n\n \n\n\n\n Plant Phenomics,plantphenomics.0146. January 2024.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{kamiya_multi-target_2024,\n\ttitle = {A multi-target regression method to predict element concentrations in tomato leaves using hyperspectral imaging},\n\tissn = {2643-6515},\n\turl = {https://spj.science.org/doi/10.34133/plantphenomics.0146},\n\tdoi = {10.34133/plantphenomics.0146},\n\tlanguage = {en},\n\turldate = {2024-01-15},\n\tjournal = {Plant Phenomics},\n\tauthor = {Kamiya, Takehiro and Ariza, Andrés Aguilar and Sotta, Naoyuki and Fujiwara, Toru and Guo, Wei},\n\tmonth = jan,\n\tyear = {2024},\n\tpages = {plantphenomics.0146},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Quantitative 2D fruit shape analysis of a wide range of pear genetic resources toward shape design breeding.\n \n \n \n \n\n\n \n Wang, H.; Yin, H.; Li, H.; Wu, G.; Guo, W.; Qi, K.; Tao, S.; Zhang, S.; Ninomiya, S.; and Mu, Y.\n\n\n \n\n\n\n Scientia Horticulturae, 327: 112826. March 2024.\n \n\n\n\n
\n\n\n\n \n \n \"QuantitativePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{wang_quantitative_2024,\n\ttitle = {Quantitative {2D} fruit shape analysis of a wide range of pear genetic resources toward shape design breeding},\n\tvolume = {327},\n\tissn = {03044238},\n\turl = {https://linkinghub.elsevier.com/retrieve/pii/S0304423823009949},\n\tdoi = {10.1016/j.scienta.2023.112826},\n\tlanguage = {en},\n\turldate = {2024-01-16},\n\tjournal = {Scientia Horticulturae},\n\tauthor = {Wang, Huimin and Yin, Hao and Li, Haitao and Wu, Gengchen and Guo, Wei and Qi, Kaijie and Tao, Shutian and Zhang, Shaoling and Ninomiya, Seishi and Mu, Yue},\n\tmonth = mar,\n\tyear = {2024},\n\tpages = {112826},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n DomAda-FruitDet: Domain-Adaptive Anchor-Free Fruit Detection Model for Auto Labeling.\n \n \n \n \n\n\n \n Zhang, W.; Zheng, C.; Wang, C.; and Guo, W.\n\n\n \n\n\n\n Plant Phenomics, 6: 0135. January 2024.\n \n\n\n\n
\n\n\n\n \n \n \"DomAda-FruitDet:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{zhang_domada-fruitdet_2024,\n\ttitle = {{DomAda}-{FruitDet}: {Domain}-{Adaptive} {Anchor}-{Free} {Fruit} {Detection} {Model} for {Auto} {Labeling}},\n\tvolume = {6},\n\tissn = {2643-6515},\n\tshorttitle = {{DomAda}-{FruitDet}},\n\turl = {https://spj.science.org/doi/10.34133/plantphenomics.0135},\n\tdoi = {10.34133/plantphenomics.0135},\n\tabstract = {Recently, deep learning-based fruit detection applications have been widely used in the modern fruit industry; however, the training data labeling process remains a time-consuming and labor-intensive process. Auto labeling can provide a convenient and efficient data source for constructing smart orchards based on deep-learning technology. In our previous study, based on a labeled source domain fruit dataset, we used a generative adversarial network and a fruit detection model to achieve auto labeling of unlabeled target domain fruit images. However, since the current method uses one species source domain fruit to label multiple species target domain fruits, there is a problem of the domain gap in both the foreground and the background between the training data (retaining the source domain fruit label information) and the application data (target domain fruit images) of the fruit detection model. Therefore, we propose a domain-adaptive anchor-free fruit detection model, DomAda-FruitDet, and apply it to the previously proposed fruit labeling method to further improve the accuracy. It consists of 2 design aspects: (a) With a foreground domain-adaptive structure based on double prediction layers, an anchor-free method with multiscale detection capability is constructed to generate adaptive bounding boxes that overcome the foreground domain gap; (b) with a background domain-adaptive strategy based on sample allocation, we enhance the ability of the model to extract foreground object features to overcome the background domain gap. As a result, the proposed method can label actual apple, tomato, pitaya, and mango datasets, with an average precision of 90.9\\%, 90.8\\%, 88.3\\%, and 94.0\\%, respectively. In conclusion, the proposed DomAda-FruitDet effectively addressed the problem of the domain gap and improved effective auto labeling for fruit detection tasks.},\n\tlanguage = {en},\n\turldate = {2024-01-23},\n\tjournal = {Plant Phenomics},\n\tauthor = {Zhang, Wenli and Zheng, Chao and Wang, Chenhuizi and Guo, Wei},\n\tmonth = jan,\n\tyear = {2024},\n\tpages = {0135},\n}\n\n
\n
\n\n\n
\n Recently, deep learning-based fruit detection applications have been widely used in the modern fruit industry; however, the training data labeling process remains a time-consuming and labor-intensive process. Auto labeling can provide a convenient and efficient data source for constructing smart orchards based on deep-learning technology. In our previous study, based on a labeled source domain fruit dataset, we used a generative adversarial network and a fruit detection model to achieve auto labeling of unlabeled target domain fruit images. However, since the current method uses one species source domain fruit to label multiple species target domain fruits, there is a problem of the domain gap in both the foreground and the background between the training data (retaining the source domain fruit label information) and the application data (target domain fruit images) of the fruit detection model. Therefore, we propose a domain-adaptive anchor-free fruit detection model, DomAda-FruitDet, and apply it to the previously proposed fruit labeling method to further improve the accuracy. It consists of 2 design aspects: (a) With a foreground domain-adaptive structure based on double prediction layers, an anchor-free method with multiscale detection capability is constructed to generate adaptive bounding boxes that overcome the foreground domain gap; (b) with a background domain-adaptive strategy based on sample allocation, we enhance the ability of the model to extract foreground object features to overcome the background domain gap. As a result, the proposed method can label actual apple, tomato, pitaya, and mango datasets, with an average precision of 90.9%, 90.8%, 88.3%, and 94.0%, respectively. In conclusion, the proposed DomAda-FruitDet effectively addressed the problem of the domain gap and improved effective auto labeling for fruit detection tasks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Channel Attention GAN-Based Synthetic Weed Generation for Precise Weed Identification.\n \n \n \n \n\n\n \n Li, T.; Asai, M.; Kato, Y.; Fukano, Y.; and Guo, W.\n\n\n \n\n\n\n Plant Phenomics, 6: 0122. January 2024.\n \n\n\n\n
\n\n\n\n \n \n \"ChannelPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{li_channel_2024,\n\ttitle = {Channel {Attention} {GAN}-{Based} {Synthetic} {Weed} {Generation} for {Precise} {Weed} {Identification}},\n\tvolume = {6},\n\tissn = {2643-6515},\n\turl = {https://spj.science.org/doi/10.34133/plantphenomics.0122},\n\tdoi = {10.34133/plantphenomics.0122},\n\tabstract = {Weed is a major biological factor causing declines in crop yield. However, widespread herbicide application and indiscriminate weeding with soil disturbance are of great concern because of their environmental impacts. Site-specific weed management (SSWM) refers to a weed management strategy for digital agriculture that results in low energy loss. Deep learning is crucial for developing SSWM, as it distinguishes crops from weeds and identifies weed species. However, this technique requires substantial annotated data, which necessitates expertise in weed science and agronomy. In this study, we present a channel attention mechanism-driven generative adversarial network (CA-GAN) that can generate realistic synthetic weed data. The performance of the model was evaluated using two datasets: the public segmented Plant Seedling Dataset (sPSD), featuring nine common broadleaf weeds from arable land, and the Institute for Sustainable Agro-ecosystem Services (ISAS) dataset, which includes five common summer weeds in Japan. Consequently, the synthetic dataset generated by the proposed CA-GAN obtained an 82.63\\% recognition accuracy on the sPSD and 93.46\\% on the ISAS dataset. The Fréchet inception distance (FID) score test measures the similarity between a synthetic and real dataset, and it has been shown to correlate well with human judgments of the quality of synthetic samples. The synthetic dataset achieved a low FID score (20.95 on the sPSD and 24.31 on the ISAS dataset). Overall, the experimental results demonstrated that the proposed method outperformed previous state-of-the-art GAN models in terms of image quality, diversity, and discriminability, making it a promising approach for synthetic agricultural data generation.},\n\tlanguage = {en},\n\turldate = {2024-04-17},\n\tjournal = {Plant Phenomics},\n\tauthor = {Li, Tang and Asai, Motoaki and Kato, Yoichiro and Fukano, Yuya and Guo, Wei},\n\tmonth = jan,\n\tyear = {2024},\n\tpages = {0122},\n}\n\n
\n
\n\n\n
\n Weed is a major biological factor causing declines in crop yield. However, widespread herbicide application and indiscriminate weeding with soil disturbance are of great concern because of their environmental impacts. Site-specific weed management (SSWM) refers to a weed management strategy for digital agriculture that results in low energy loss. Deep learning is crucial for developing SSWM, as it distinguishes crops from weeds and identifies weed species. However, this technique requires substantial annotated data, which necessitates expertise in weed science and agronomy. In this study, we present a channel attention mechanism-driven generative adversarial network (CA-GAN) that can generate realistic synthetic weed data. The performance of the model was evaluated using two datasets: the public segmented Plant Seedling Dataset (sPSD), featuring nine common broadleaf weeds from arable land, and the Institute for Sustainable Agro-ecosystem Services (ISAS) dataset, which includes five common summer weeds in Japan. Consequently, the synthetic dataset generated by the proposed CA-GAN obtained an 82.63% recognition accuracy on the sPSD and 93.46% on the ISAS dataset. The Fréchet inception distance (FID) score test measures the similarity between a synthetic and real dataset, and it has been shown to correlate well with human judgments of the quality of synthetic samples. The synthetic dataset achieved a low FID score (20.95 on the sPSD and 24.31 on the ISAS dataset). Overall, the experimental results demonstrated that the proposed method outperformed previous state-of-the-art GAN models in terms of image quality, diversity, and discriminability, making it a promising approach for synthetic agricultural data generation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n EasyDAM_V4: Guided-GAN-based cross-species data labeling for fruit detection with significant shape difference.\n \n \n \n \n\n\n \n Zhang, W.; Liu, Y.; Wang, C.; Zheng, C.; Cui, G.; and Guo, W.\n\n\n \n\n\n\n Horticulture Research, 11(3): uhae007. March 2024.\n \n\n\n\n
\n\n\n\n \n \n \"EasyDAM_V4:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{zhang_easydam_v4_2024,\n\ttitle = {{EasyDAM}\\_V4: {Guided}-{GAN}-based cross-species data labeling for fruit detection with significant shape difference},\n\tvolume = {11},\n\tcopyright = {https://creativecommons.org/licenses/by/4.0/},\n\tissn = {2052-7276},\n\tshorttitle = {{EasyDAM}\\_V4},\n\turl = {https://academic.oup.com/hr/article/doi/10.1093/hr/uhae007/7515263},\n\tdoi = {10.1093/hr/uhae007},\n\tabstract = {Abstract\n            Traditional agriculture is gradually being combined with artificial intelligence technology. High-performance fruit detection technology is an important basic technology in the practical application of modern smart orchards and has great application value. At this stage, fruit detection models need to rely on a large number of labeled datasets to support the training and learning of detection models, resulting in higher manual labeling costs. Our previous work uses a generative adversarial network to translate the source domain to the target fruit images. Thus, automatic labeling is performed on the actual dataset in the target domain. However, the method still does not achieve satisfactory results for translating fruits with significant shape variance. Therefore, this study proposes an improved fruit automatic labeling method, EasyDAM\\_V4, which introduces the Across-CycleGAN fruit translation model to achieve spanning translation between phenotypic features such as fruit shape, texture, and color to reduce domain differences effectively. We validated the proposed method using pear fruit as the source domain and three fruits with large phenotypic differences, namely pitaya, eggplant, and cucumber, as the target domain. The results show that the EasyDAM\\_V4 method achieves substantial cross-fruit shape translation, and the average accuracy of labeling reached 87.8, 87.0, and 80.7\\% for the three types of target domain datasets, respectively. Therefore, this research method can improve the applicability of the automatic labeling process even if significant shape variance exists between the source and target domain.},\n\tlanguage = {en},\n\tnumber = {3},\n\turldate = {2024-04-20},\n\tjournal = {Horticulture Research},\n\tauthor = {Zhang, Wenli and Liu, Yuxin and Wang, Chenhuizi and Zheng, Chao and Cui, Guoqiang and Guo, Wei},\n\tmonth = mar,\n\tyear = {2024},\n\tpages = {uhae007},\n}\n\n
\n
\n\n\n
\n Abstract Traditional agriculture is gradually being combined with artificial intelligence technology. High-performance fruit detection technology is an important basic technology in the practical application of modern smart orchards and has great application value. At this stage, fruit detection models need to rely on a large number of labeled datasets to support the training and learning of detection models, resulting in higher manual labeling costs. Our previous work uses a generative adversarial network to translate the source domain to the target fruit images. Thus, automatic labeling is performed on the actual dataset in the target domain. However, the method still does not achieve satisfactory results for translating fruits with significant shape variance. Therefore, this study proposes an improved fruit automatic labeling method, EasyDAM_V4, which introduces the Across-CycleGAN fruit translation model to achieve spanning translation between phenotypic features such as fruit shape, texture, and color to reduce domain differences effectively. We validated the proposed method using pear fruit as the source domain and three fruits with large phenotypic differences, namely pitaya, eggplant, and cucumber, as the target domain. The results show that the EasyDAM_V4 method achieves substantial cross-fruit shape translation, and the average accuracy of labeling reached 87.8, 87.0, and 80.7% for the three types of target domain datasets, respectively. Therefore, this research method can improve the applicability of the automatic labeling process even if significant shape variance exists between the source and target domain.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Use of scanning devices for object 3D reconstruction by photogrammetry and visualization in virtual reality.\n \n \n \n \n\n\n \n Drofova, I.; Guo, W.; Wang, H.; and Adamek, M.\n\n\n \n\n\n\n Bulletin of Electrical Engineering and Informatics, 12(2): 868–881. April 2023.\n \n\n\n\n
\n\n\n\n \n \n \"UsePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 8 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{drofova_use_2023,\n\ttitle = {Use of scanning devices for object {3D} reconstruction by photogrammetry and visualization in virtual reality},\n\tvolume = {12},\n\tcopyright = {Copyright (c) 2022 Institute of Advanced Engineering and Science},\n\tissn = {2302-9285},\n\turl = {https://beei.org/index.php/EEI/article/view/4584},\n\tdoi = {10.11591/eei.v12i2.4584},\n\tabstract = {This article aims to compare two different scanning devices (360 camera and digital single lens reflex (DSLR) camera) and their properties in the three-dimensional (3D) reconstruction of the object by the photogrammetry method. The article first describes the various stages of the process of 3D modeling and reconstruction of the object. A point cloud generated to the 3D model of the object, including textures, is created in the following steps. The scanning devices are compared under the same conditions and time from capturing the image of a real object to its 3D reconstruction. The attributes of the scanned image of the recon-structed 3D model, which is a mandarin tree in a citrus greenhouse in a daylight environment, are also compared. Both created models are also compared visually. That visual comparison reveals the possibilities in the application of both scanning devices can be found in the process of 3D reconstruction of the object by photogrammetry. The results of this research can be applied in the field of 3D modeling of a real object using 3D models in virtual reality, 3D printing, 3D visualization, image analysis, and 3D online presentation.},\n\tlanguage = {en},\n\tnumber = {2},\n\turldate = {2022-12-20},\n\tjournal = {Bulletin of Electrical Engineering and Informatics},\n\tauthor = {Drofova, Irena and Guo, Wei and Wang, Haozhou and Adamek, Milan},\n\tmonth = apr,\n\tyear = {2023},\n\tkeywords = {3D model, 3D reconstruction, Image processing, Photogrammetry, Point cloud, Virtual reality},\n\tpages = {868--881},\n}\n\n
\n
\n\n\n
\n This article aims to compare two different scanning devices (360 camera and digital single lens reflex (DSLR) camera) and their properties in the three-dimensional (3D) reconstruction of the object by the photogrammetry method. The article first describes the various stages of the process of 3D modeling and reconstruction of the object. A point cloud generated to the 3D model of the object, including textures, is created in the following steps. The scanning devices are compared under the same conditions and time from capturing the image of a real object to its 3D reconstruction. The attributes of the scanned image of the recon-structed 3D model, which is a mandarin tree in a citrus greenhouse in a daylight environment, are also compared. Both created models are also compared visually. That visual comparison reveals the possibilities in the application of both scanning devices can be found in the process of 3D reconstruction of the object by photogrammetry. The results of this research can be applied in the field of 3D modeling of a real object using 3D models in virtual reality, 3D printing, 3D visualization, image analysis, and 3D online presentation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improved Field-Based Soybean Seed Counting and Localization with Feature Level Considered.\n \n \n \n \n\n\n \n Zhao, J.; Kaga, A.; Yamada, T.; Komatsu, K.; Hirata, K.; Kikuchi, A.; Hirafuji, M.; Ninomiya, S.; and Guo, W.\n\n\n \n\n\n\n Plant Phenomics, 5: 0026. March 2023.\n \n\n\n\n
\n\n\n\n \n \n \"ImprovedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{zhao_improved_2023,\n\ttitle = {Improved {Field}-{Based} {Soybean} {Seed} {Counting} and {Localization} with {Feature} {Level} {Considered}},\n\tvolume = {5},\n\tissn = {2643-6515},\n\turl = {https://spj.science.org/doi/10.34133/plantphenomics.0026},\n\tdoi = {10.34133/plantphenomics.0026},\n\tabstract = {Developing automated soybean seed counting tools will help automate yield prediction before harvesting and improving selection efficiency in breeding programs. An integrated approach for counting and localization is ideal for subsequent analysis. The traditional method of object counting is labor-intensive and error-prone and has low localization accuracy. To quantify soybean seed directly rather than sequentially, we propose a P2PNet-Soy method. Several strategies were considered to adjust the architecture and subsequent postprocessing to maximize model performance in seed counting and localization. First, unsupervised clustering was applied to merge closely located overcounts. Second, low-level features were included with high-level features to provide more information. Third, atrous convolution with different kernel sizes was applied to low- and high-level features to extract scale-invariant features to factor in soybean size variation. Fourth, channel and spatial attention effectively separated the foreground and background for easier soybean seed counting and localization. At last, the input image was added to these extracted features to improve model performance. Using 24 soybean accessions as experimental materials, we trained the model on field images of individual soybean plants obtained from one side and tested them on images obtained from the opposite side, with all the above strategies. The superiority of the proposed P2PNet-Soy in soybean seed counting and localization over the original P2PNet was confirmed by a reduction in the value of the mean absolute error, from 105.55 to 12.94. Furthermore, the trained model worked effectively on images obtained directly from the field without background interference.},\n\tlanguage = {en},\n\turldate = {2023-03-24},\n\tjournal = {Plant Phenomics},\n\tauthor = {Zhao, Jiangsan and Kaga, Akito and Yamada, Tetsuya and Komatsu, Kunihiko and Hirata, Kaori and Kikuchi, Akio and Hirafuji, Masayuki and Ninomiya, Seishi and Guo, Wei},\n\tmonth = mar,\n\tyear = {2023},\n\tpages = {0026},\n}\n\n
\n
\n\n\n
\n Developing automated soybean seed counting tools will help automate yield prediction before harvesting and improving selection efficiency in breeding programs. An integrated approach for counting and localization is ideal for subsequent analysis. The traditional method of object counting is labor-intensive and error-prone and has low localization accuracy. To quantify soybean seed directly rather than sequentially, we propose a P2PNet-Soy method. Several strategies were considered to adjust the architecture and subsequent postprocessing to maximize model performance in seed counting and localization. First, unsupervised clustering was applied to merge closely located overcounts. Second, low-level features were included with high-level features to provide more information. Third, atrous convolution with different kernel sizes was applied to low- and high-level features to extract scale-invariant features to factor in soybean size variation. Fourth, channel and spatial attention effectively separated the foreground and background for easier soybean seed counting and localization. At last, the input image was added to these extracted features to improve model performance. Using 24 soybean accessions as experimental materials, we trained the model on field images of individual soybean plants obtained from one side and tested them on images obtained from the opposite side, with all the above strategies. The superiority of the proposed P2PNet-Soy in soybean seed counting and localization over the original P2PNet was confirmed by a reduction in the value of the mean absolute error, from 105.55 to 12.94. Furthermore, the trained model worked effectively on images obtained directly from the field without background interference.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Self-Supervised Plant Phenotyping by Combining Domain Adaptation with 3D Plant Model Simulations: Application to Wheat Leaf Counting at Seedling Stage.\n \n \n \n \n\n\n \n Li, Y.; Zhan, X.; Liu, S.; Lu, H.; Jiang, R.; Guo, W.; Chapman, S.; Ge, Y.; Solan, B.; Ding, Y.; and Baret, F.\n\n\n \n\n\n\n Plant Phenomics, 5: 0041. April 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Self-SupervisedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{li_self-supervised_2023,\n\ttitle = {Self-{Supervised} {Plant} {Phenotyping} by {Combining} {Domain} {Adaptation} with {3D} {Plant} {Model} {Simulations}: {Application} to {Wheat} {Leaf} {Counting} at {Seedling} {Stage}},\n\tvolume = {5},\n\tissn = {2643-6515},\n\tshorttitle = {Self-{Supervised} {Plant} {Phenotyping} by {Combining} {Domain} {Adaptation} with {3D} {Plant} {Model} {Simulations}},\n\turl = {https://spj.science.org/doi/10.34133/plantphenomics.0041},\n\tdoi = {10.34133/plantphenomics.0041},\n\tabstract = {The number of leaves at a given time is important to characterize plant growth and development. In this work, we developed a high-throughput method to count the number of leaves by detecting leaf tips in RGB images. The digital plant phenotyping platform was used to simulate a large and diverse dataset of RGB images and corresponding leaf tip labels of wheat plants at seedling stages (150,000 images with over 2 million labels). The realism of the images was then improved using domain adaptation methods before training deep learning models. The results demonstrate the efficiency of the proposed method evaluated on a diverse test dataset, collecting measurements from 5 countries obtained under different environments, growth stages, and lighting conditions with different cameras (450 images with over 2,162 labels). Among the 6 combinations of deep learning models and domain adaptation techniques, the Faster-RCNN model with cycle-consistent generative adversarial network adaptation technique provided the best performance (\n              R\n              2\n              = 0.94, root mean square error = 8.7). Complementary studies show that it is essential to simulate images with sufficient realism (background, leaf texture, and lighting conditions) before applying domain adaptation techniques. Furthermore, the spatial resolution should be better than 0.6 mm per pixel to identify leaf tips. The method is claimed to be self-supervised since no manual labeling is required for model training. The self-supervised phenotyping approach developed here offers great potential for addressing a wide range of plant phenotyping problems. The trained networks are available at\n              https://github.com/YinglunLi/Wheat-leaf-tip-detection\n              .},\n\tlanguage = {en},\n\turldate = {2023-04-22},\n\tjournal = {Plant Phenomics},\n\tauthor = {Li, Yinglun and Zhan, Xiaohai and Liu, Shouyang and Lu, Hao and Jiang, Ruibo and Guo, Wei and Chapman, Scott and Ge, Yufeng and Solan, Benoit and Ding, Yanfeng and Baret, Frédéric},\n\tmonth = apr,\n\tyear = {2023},\n\tpages = {0041},\n}\n\n
\n
\n\n\n
\n The number of leaves at a given time is important to characterize plant growth and development. In this work, we developed a high-throughput method to count the number of leaves by detecting leaf tips in RGB images. The digital plant phenotyping platform was used to simulate a large and diverse dataset of RGB images and corresponding leaf tip labels of wheat plants at seedling stages (150,000 images with over 2 million labels). The realism of the images was then improved using domain adaptation methods before training deep learning models. The results demonstrate the efficiency of the proposed method evaluated on a diverse test dataset, collecting measurements from 5 countries obtained under different environments, growth stages, and lighting conditions with different cameras (450 images with over 2,162 labels). Among the 6 combinations of deep learning models and domain adaptation techniques, the Faster-RCNN model with cycle-consistent generative adversarial network adaptation technique provided the best performance ( R 2 = 0.94, root mean square error = 8.7). Complementary studies show that it is essential to simulate images with sufficient realism (background, leaf texture, and lighting conditions) before applying domain adaptation techniques. Furthermore, the spatial resolution should be better than 0.6 mm per pixel to identify leaf tips. The method is claimed to be self-supervised since no manual labeling is required for model training. The self-supervised phenotyping approach developed here offers great potential for addressing a wide range of plant phenotyping problems. The trained networks are available at https://github.com/YinglunLi/Wheat-leaf-tip-detection .\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Non-destructive high-throughput measurement of elastic-viscous properties of maize using a novel ultra-micro sensor array and numerical validation.\n \n \n \n \n\n\n \n Nakashima, T.; Tomobe, H.; Morigaki, T.; Yang, M.; Yamaguchi, H.; Kato, Y.; Guo, W.; Sharma, V.; Kimura, H.; and Morikawa, H.\n\n\n \n\n\n\n Scientific Reports, 13(1): 4914. March 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Non-destructivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{nakashima_non-destructive_2023,\n\ttitle = {Non-destructive high-throughput measurement of elastic-viscous properties of maize using a novel ultra-micro sensor array and numerical validation},\n\tvolume = {13},\n\tissn = {2045-2322},\n\turl = {https://www.nature.com/articles/s41598-023-32130-5},\n\tdoi = {10.1038/s41598-023-32130-5},\n\tabstract = {Abstract\n            Maize is the world's most produced cereal crop, and the selection of maize cultivars with a high stem elastic modulus is an effective method to prevent cereal crop lodging. We developed an ultra-compact sensor array inspired by earthquake engineering and proposed a method for the high-throughput evaluation of the elastic modulus of maize cultivars. A natural vibration analysis based on the obtained Young’s modulus using finite element analysis (FEA) was performed and compared with the experimental results, which showed that the estimated Young’s modulus is representative of the individual Young’s modulus. FEA also showed the hotspot where the stalk was most deformed when the corn was vibrated by wind. The six tested cultivars were divided into two phenotypic groups based on the position and number of hotspots. In this study, we proposed a non-destructive high-throughput phenotyping technique for estimating the modulus of elasticity of maize stalks and successfully visualized which parts of the stalks should be improved for specific cultivars to prevent lodging.},\n\tlanguage = {en},\n\tnumber = {1},\n\turldate = {2023-04-30},\n\tjournal = {Scientific Reports},\n\tauthor = {Nakashima, Taiken and Tomobe, Haruka and Morigaki, Takumi and Yang, Mengfan and Yamaguchi, Hiroto and Kato, Yoichiro and Guo, Wei and Sharma, Vikas and Kimura, Harusato and Morikawa, Hitoshi},\n\tmonth = mar,\n\tyear = {2023},\n\tpages = {4914},\n}\n\n
\n
\n\n\n
\n Abstract Maize is the world's most produced cereal crop, and the selection of maize cultivars with a high stem elastic modulus is an effective method to prevent cereal crop lodging. We developed an ultra-compact sensor array inspired by earthquake engineering and proposed a method for the high-throughput evaluation of the elastic modulus of maize cultivars. A natural vibration analysis based on the obtained Young’s modulus using finite element analysis (FEA) was performed and compared with the experimental results, which showed that the estimated Young’s modulus is representative of the individual Young’s modulus. FEA also showed the hotspot where the stalk was most deformed when the corn was vibrated by wind. The six tested cultivars were divided into two phenotypic groups based on the position and number of hotspots. In this study, we proposed a non-destructive high-throughput phenotyping technique for estimating the modulus of elasticity of maize stalks and successfully visualized which parts of the stalks should be improved for specific cultivars to prevent lodging.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Analyzing nitrogen effects on rice panicle development by panicle detection and time-series tracking.\n \n \n \n \n\n\n \n Zhou, Q.; Guo, W.; Chen, N.; Wang, Z.; Li, G.; Ding, Y.; Ninomiya, S.; and Mu, Y.\n\n\n \n\n\n\n Plant Phenomics,plantphenomics.0048. April 2023.\n \n\n\n\n
\n\n\n\n \n \n \"AnalyzingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{zhou_analyzing_2023-1,\n\ttitle = {Analyzing nitrogen effects on rice panicle development by panicle detection and time-series tracking},\n\tissn = {2643-6515},\n\turl = {https://spj.science.org/doi/10.34133/plantphenomics.0048},\n\tdoi = {10.34133/plantphenomics.0048},\n\tlanguage = {en},\n\turldate = {2023-04-30},\n\tjournal = {Plant Phenomics},\n\tauthor = {Zhou, Qinyang and Guo, Wei and Chen, Na and Wang, Ze and Li, Ganghua and Ding, Yanfeng and Ninomiya, Seishi and Mu, Yue},\n\tmonth = apr,\n\tyear = {2023},\n\tpages = {plantphenomics.0048},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic Branch–Leaf Segmentation and Leaf Phenotypic Parameter Estimation of Pear Trees Based on Three-Dimensional Point Clouds.\n \n \n \n \n\n\n \n Li, H.; Wu, G.; Tao, S.; Yin, H.; Qi, K.; Zhang, S.; Guo, W.; Ninomiya, S.; and Mu, Y.\n\n\n \n\n\n\n Sensors, 23(9): 4572. May 2023.\n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{li_automatic_2023,\n\ttitle = {Automatic {Branch}–{Leaf} {Segmentation} and {Leaf} {Phenotypic} {Parameter} {Estimation} of {Pear} {Trees} {Based} on {Three}-{Dimensional} {Point} {Clouds}},\n\tvolume = {23},\n\tissn = {1424-8220},\n\turl = {https://www.mdpi.com/1424-8220/23/9/4572},\n\tdoi = {10.3390/s23094572},\n\tabstract = {The leaf phenotypic traits of plants have a significant impact on the efficiency of canopy photosynthesis. However, traditional methods such as destructive sampling will hinder the continuous monitoring of plant growth, while manual measurements in the field are both time-consuming and laborious. Nondestructive and accurate measurements of leaf phenotypic parameters can be achieved through the use of 3D canopy models and object segmentation techniques. This paper proposed an automatic branch–leaf segmentation pipeline based on lidar point cloud and conducted the automatic measurement of leaf inclination angle, length, width, and area, using pear canopy as an example. Firstly, a three-dimensional model using a lidar point cloud was established using SCENE software. Next, 305 pear tree branches were manually divided into branch points and leaf points, and 45 branch samples were selected as test data. Leaf points were further marked as 572 leaf instances on these test data. The PointNet++ model was used, with 260 point clouds as training input to carry out semantic segmentation of branches and leaves. Using the leaf point clouds in the test dataset as input, a single leaf instance was extracted by means of a mean shift clustering algorithm. Finally, based on the single leaf point cloud, the leaf inclination angle was calculated by plane fitting, while the leaf length, width, and area were calculated by midrib fitting and triangulation. The semantic segmentation model was tested on 45 branches, with a mean Precisionsem, mean Recallsem, mean F1-score, and mean Intersection over Union (IoU) of branches and leaves of 0.93, 0.94, 0.93, and 0.88, respectively. For single leaf extraction, the Precisionins, Recallins, and mean coverage (mCoV) were 0.89, 0.92, and 0.87, respectively. Using the proposed method, the estimated leaf inclination, length, width, and area of pear leaves showed a high correlation with manual measurements, with correlation coefficients of 0.94 (root mean squared error: 4.44°), 0.94 (root mean squared error: 0.43 cm), 0.91 (root mean squared error: 0.39 cm), and 0.93 (root mean squared error: 5.21 cm2), respectively. These results demonstrate that the method can automatically and accurately measure the phenotypic parameters of pear leaves. This has great significance for monitoring pear tree growth, simulating canopy photosynthesis, and optimizing orchard management.},\n\tlanguage = {en},\n\tnumber = {9},\n\turldate = {2023-05-16},\n\tjournal = {Sensors},\n\tauthor = {Li, Haitao and Wu, Gengchen and Tao, Shutian and Yin, Hao and Qi, Kaijie and Zhang, Shaoling and Guo, Wei and Ninomiya, Seishi and Mu, Yue},\n\tmonth = may,\n\tyear = {2023},\n\tpages = {4572},\n}\n\n
\n
\n\n\n
\n The leaf phenotypic traits of plants have a significant impact on the efficiency of canopy photosynthesis. However, traditional methods such as destructive sampling will hinder the continuous monitoring of plant growth, while manual measurements in the field are both time-consuming and laborious. Nondestructive and accurate measurements of leaf phenotypic parameters can be achieved through the use of 3D canopy models and object segmentation techniques. This paper proposed an automatic branch–leaf segmentation pipeline based on lidar point cloud and conducted the automatic measurement of leaf inclination angle, length, width, and area, using pear canopy as an example. Firstly, a three-dimensional model using a lidar point cloud was established using SCENE software. Next, 305 pear tree branches were manually divided into branch points and leaf points, and 45 branch samples were selected as test data. Leaf points were further marked as 572 leaf instances on these test data. The PointNet++ model was used, with 260 point clouds as training input to carry out semantic segmentation of branches and leaves. Using the leaf point clouds in the test dataset as input, a single leaf instance was extracted by means of a mean shift clustering algorithm. Finally, based on the single leaf point cloud, the leaf inclination angle was calculated by plane fitting, while the leaf length, width, and area were calculated by midrib fitting and triangulation. The semantic segmentation model was tested on 45 branches, with a mean Precisionsem, mean Recallsem, mean F1-score, and mean Intersection over Union (IoU) of branches and leaves of 0.93, 0.94, 0.93, and 0.88, respectively. For single leaf extraction, the Precisionins, Recallins, and mean coverage (mCoV) were 0.89, 0.92, and 0.87, respectively. Using the proposed method, the estimated leaf inclination, length, width, and area of pear leaves showed a high correlation with manual measurements, with correlation coefficients of 0.94 (root mean squared error: 4.44°), 0.94 (root mean squared error: 0.43 cm), 0.91 (root mean squared error: 0.39 cm), and 0.93 (root mean squared error: 5.21 cm2), respectively. These results demonstrate that the method can automatically and accurately measure the phenotypic parameters of pear leaves. This has great significance for monitoring pear tree growth, simulating canopy photosynthesis, and optimizing orchard management.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n VegAnn, Vegetation Annotation of multi-crop RGB images acquired under diverse conditions for segmentation.\n \n \n \n \n\n\n \n Madec, S.; Irfan, K.; Velumani, K.; Baret, F.; David, E.; Daubige, G.; Samatan, L. B.; Serouart, M.; Smith, D.; James, C.; Camacho, F.; Guo, W.; De Solan, B.; Chapman, S. C.; and Weiss, M.\n\n\n \n\n\n\n Scientific Data, 10(1): 302. May 2023.\n \n\n\n\n
\n\n\n\n \n \n \"VegAnn,Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{madec_vegann_2023,\n\ttitle = {{VegAnn}, {Vegetation} {Annotation} of multi-crop {RGB} images acquired under diverse conditions for segmentation},\n\tvolume = {10},\n\tissn = {2052-4463},\n\turl = {https://www.nature.com/articles/s41597-023-02098-y},\n\tdoi = {10.1038/s41597-023-02098-y},\n\tabstract = {Abstract\n            \n              Applying deep learning to images of cropping systems provides new knowledge and insights in research and commercial applications. Semantic segmentation or pixel-wise classification, of RGB images acquired at the ground level, into vegetation and background is a critical step in the estimation of several canopy traits. Current state of the art methodologies based on convolutional neural networks (CNNs) are trained on datasets acquired under controlled or indoor environments. These models are unable to generalize to real-world images and hence need to be fine-tuned using new labelled datasets. This motivated the creation of the VegAnn -\n              Veg\n              etation\n              Ann\n              otation - dataset, a collection of 3775 multi-crop RGB images acquired for different phenological stages using different systems and platforms in diverse illumination conditions. We anticipate that VegAnn will help improving segmentation algorithm performances, facilitate benchmarking and promote large-scale crop vegetation segmentation research.},\n\tlanguage = {en},\n\tnumber = {1},\n\turldate = {2023-05-24},\n\tjournal = {Scientific Data},\n\tauthor = {Madec, Simon and Irfan, Kamran and Velumani, Kaaviya and Baret, Frederic and David, Etienne and Daubige, Gaetan and Samatan, Lucas Bernigaud and Serouart, Mario and Smith, Daniel and James, Chrisbin and Camacho, Fernando and Guo, Wei and De Solan, Benoit and Chapman, Scott C. and Weiss, Marie},\n\tmonth = may,\n\tyear = {2023},\n\tpages = {302},\n}\n\n
\n
\n\n\n
\n Abstract Applying deep learning to images of cropping systems provides new knowledge and insights in research and commercial applications. Semantic segmentation or pixel-wise classification, of RGB images acquired at the ground level, into vegetation and background is a critical step in the estimation of several canopy traits. Current state of the art methodologies based on convolutional neural networks (CNNs) are trained on datasets acquired under controlled or indoor environments. These models are unable to generalize to real-world images and hence need to be fine-tuned using new labelled datasets. This motivated the creation of the VegAnn - Veg etation Ann otation - dataset, a collection of 3775 multi-crop RGB images acquired for different phenological stages using different systems and platforms in diverse illumination conditions. We anticipate that VegAnn will help improving segmentation algorithm performances, facilitate benchmarking and promote large-scale crop vegetation segmentation research.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Enhancing Green Fraction Estimation in Rice and Wheat Crops: A Self-Supervised Deep Learning Semantic Segmentation Approach.\n \n \n \n \n\n\n \n Gao, Y.; Li, Y.; Jiang, R.; Zhan, X.; Lu, H.; Guo, W.; Yang, W.; Ding, Y.; and Liu, S.\n\n\n \n\n\n\n Plant Phenomics, 5: 0064. July 2023.\n \n\n\n\n
\n\n\n\n \n \n \"EnhancingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{gao_enhancing_2023,\n\ttitle = {Enhancing {Green} {Fraction} {Estimation} in {Rice} and {Wheat} {Crops}: {A} {Self}-{Supervised} {Deep} {Learning} {Semantic} {Segmentation} {Approach}},\n\tvolume = {5},\n\tissn = {2643-6515},\n\tshorttitle = {Enhancing {Green} {Fraction} {Estimation} in {Rice} and {Wheat} {Crops}},\n\turl = {https://spj.science.org/doi/10.34133/plantphenomics.0064},\n\tdoi = {10.34133/plantphenomics.0064},\n\tabstract = {The green fraction (GF), which is the fraction of green vegetation in a given viewing direction, is closely related to the light interception ability of the crop canopy. Monitoring the dynamics of GF is therefore of great interest for breeders to identify genotypes with high radiation use efficiency. The accuracy of GF estimation depends heavily on the quality of the segmentation dataset and the accuracy of the image segmentation method. To enhance segmentation accuracy while reducing annotation costs, we developed a self-supervised strategy for deep learning semantic segmentation of rice and wheat field images with very contrasting field backgrounds. First, the Digital Plant Phenotyping Platform was used to generate large, perfectly labeled simulated field images for wheat and rice crops, considering diverse canopy structures and a wide range of environmental conditions (sim dataset). We then used the domain adaptation model cycle-consistent generative adversarial network (CycleGAN) to bridge the reality gap between the simulated and real images (real dataset), producing simulation-to-reality images (sim2real dataset). Finally, 3 different semantic segmentation models (U-Net, DeepLabV3+, and SegFormer) were trained using 3 datasets (real, sim, and sim2real datasets). The performance of the 9 training strategies was assessed using real images captured from various sites. The results showed that SegFormer trained using the sim2real dataset achieved the best segmentation performance for both rice and wheat crops (rice: Accuracy = 0.940, F1-score = 0.937; wheat: Accuracy = 0.952, F1-score = 0.935). Likewise, favorable GF estimation results were obtained using the above strategy (rice:\n              R\n              2\n               = 0.967, RMSE = 0.048; wheat:\n              R\n              2\n               = 0.984, RMSE = 0.028). Compared with SegFormer trained using a real dataset, the optimal strategy demonstrated greater superiority for wheat images than for rice images. This discrepancy can be partially attributed to the differences in the backgrounds of the rice and wheat fields. The uncertainty analysis indicated that our strategy could be disrupted by the inhomogeneity of pixel brightness and the presence of senescent elements in the images. In summary, our self-supervised strategy addresses the issues of high cost and uncertain annotation accuracy during dataset creation, ultimately enhancing GF estimation accuracy for rice and wheat field images. The best weights we trained in wheat and rice are available:\n              https://github.com/PheniX-Lab/sim2real-seg\n              .},\n\tlanguage = {en},\n\turldate = {2023-07-31},\n\tjournal = {Plant Phenomics},\n\tauthor = {Gao, Yangmingrui and Li, Yinglun and Jiang, Ruibo and Zhan, Xiaohai and Lu, Hao and Guo, Wei and Yang, Wanneng and Ding, Yanfeng and Liu, Shouyang},\n\tmonth = jul,\n\tyear = {2023},\n\tpages = {0064},\n}\n\n
\n
\n\n\n
\n The green fraction (GF), which is the fraction of green vegetation in a given viewing direction, is closely related to the light interception ability of the crop canopy. Monitoring the dynamics of GF is therefore of great interest for breeders to identify genotypes with high radiation use efficiency. The accuracy of GF estimation depends heavily on the quality of the segmentation dataset and the accuracy of the image segmentation method. To enhance segmentation accuracy while reducing annotation costs, we developed a self-supervised strategy for deep learning semantic segmentation of rice and wheat field images with very contrasting field backgrounds. First, the Digital Plant Phenotyping Platform was used to generate large, perfectly labeled simulated field images for wheat and rice crops, considering diverse canopy structures and a wide range of environmental conditions (sim dataset). We then used the domain adaptation model cycle-consistent generative adversarial network (CycleGAN) to bridge the reality gap between the simulated and real images (real dataset), producing simulation-to-reality images (sim2real dataset). Finally, 3 different semantic segmentation models (U-Net, DeepLabV3+, and SegFormer) were trained using 3 datasets (real, sim, and sim2real datasets). The performance of the 9 training strategies was assessed using real images captured from various sites. The results showed that SegFormer trained using the sim2real dataset achieved the best segmentation performance for both rice and wheat crops (rice: Accuracy = 0.940, F1-score = 0.937; wheat: Accuracy = 0.952, F1-score = 0.935). Likewise, favorable GF estimation results were obtained using the above strategy (rice: R 2  = 0.967, RMSE = 0.048; wheat: R 2  = 0.984, RMSE = 0.028). Compared with SegFormer trained using a real dataset, the optimal strategy demonstrated greater superiority for wheat images than for rice images. This discrepancy can be partially attributed to the differences in the backgrounds of the rice and wheat fields. The uncertainty analysis indicated that our strategy could be disrupted by the inhomogeneity of pixel brightness and the presence of senescent elements in the images. In summary, our self-supervised strategy addresses the issues of high cost and uncertain annotation accuracy during dataset creation, ultimately enhancing GF estimation accuracy for rice and wheat field images. The best weights we trained in wheat and rice are available: https://github.com/PheniX-Lab/sim2real-seg .\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n EasyDAM_V3: Automatic Fruit Labeling Based on Optimal Source Domain Selection and Data Synthesis via a Knowledge Graph.\n \n \n \n \n\n\n \n Zhang, W.; Liu, Y.; Zheng, C.; Cui, G.; and Guo, W.\n\n\n \n\n\n\n Plant Phenomics, 5: 0067. July 2023.\n \n\n\n\n
\n\n\n\n \n \n \"EasyDAM_V3:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{zhang_easydam_v3_2023,\n\ttitle = {{EasyDAM}\\_V3: {Automatic} {Fruit} {Labeling} {Based} on {Optimal} {Source} {Domain} {Selection} and {Data} {Synthesis} via a {Knowledge} {Graph}},\n\tvolume = {5},\n\tissn = {2643-6515},\n\tshorttitle = {{EasyDAM}\\_V3},\n\turl = {https://spj.science.org/doi/10.34133/plantphenomics.0067},\n\tdoi = {10.34133/plantphenomics.0067},\n\tabstract = {Although deep learning-based fruit detection techniques are becoming popular, they require a large number of labeled datasets to support model training. Moreover, the manual labeling process is time-consuming and labor-intensive. We previously implemented a generative adversarial network-based method to reduce labeling costs. However, it does not consider fitness among more species. Methods of selecting the most suitable source domain dataset based on the fruit datasets of the target domain remain to be investigated. Moreover, current automatic labeling technology still requires manual labeling of the source domain dataset and cannot completely eliminate manual processes. Therefore, an improved EasyDAM\\_V3 model was proposed in this study as an automatic labeling method for additional classes of fruit. This study proposes both an optimal source domain establishment method based on a multidimensional spatial feature model to select the most suitable source domain, and a high-volume dataset construction method based on transparent background fruit image translation by constructing a knowledge graph of orchard scene hierarchy component synthesis rules. The EasyDAM\\_V3 model can automatically obtain fruit label information from the dataset, thereby eliminating manual labeling. To test the proposed method, pear was used as the selected optimal source domain, followed by orange, apple, and tomato as the target domain datasets. The results showed that the average precision of annotation reached 90.94\\%, 89.78\\%, and 90.84\\% for the target datasets, respectively. The EasyDAM\\_V3 model can obtain the optimal source domain in automatic labeling tasks, thus eliminating the manual labeling process and reducing associated costs and labor.},\n\tlanguage = {en},\n\turldate = {2023-07-31},\n\tjournal = {Plant Phenomics},\n\tauthor = {Zhang, Wenli and Liu, Yuxin and Zheng, Chao and Cui, Guoqiang and Guo, Wei},\n\tmonth = jul,\n\tyear = {2023},\n\tpages = {0067},\n}\n\n
\n
\n\n\n
\n Although deep learning-based fruit detection techniques are becoming popular, they require a large number of labeled datasets to support model training. Moreover, the manual labeling process is time-consuming and labor-intensive. We previously implemented a generative adversarial network-based method to reduce labeling costs. However, it does not consider fitness among more species. Methods of selecting the most suitable source domain dataset based on the fruit datasets of the target domain remain to be investigated. Moreover, current automatic labeling technology still requires manual labeling of the source domain dataset and cannot completely eliminate manual processes. Therefore, an improved EasyDAM_V3 model was proposed in this study as an automatic labeling method for additional classes of fruit. This study proposes both an optimal source domain establishment method based on a multidimensional spatial feature model to select the most suitable source domain, and a high-volume dataset construction method based on transparent background fruit image translation by constructing a knowledge graph of orchard scene hierarchy component synthesis rules. The EasyDAM_V3 model can automatically obtain fruit label information from the dataset, thereby eliminating manual labeling. To test the proposed method, pear was used as the selected optimal source domain, followed by orange, apple, and tomato as the target domain datasets. The results showed that the average precision of annotation reached 90.94%, 89.78%, and 90.84% for the target datasets, respectively. The EasyDAM_V3 model can obtain the optimal source domain in automatic labeling tasks, thus eliminating the manual labeling process and reducing associated costs and labor.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Global Wheat Head Detection Challenges: Winning Models and Application for Head Counting.\n \n \n \n \n\n\n \n David, E.; Ogidi, F.; Smith, D.; Chapman, S.; De Solan, B.; Guo, W.; Baret, F.; and Stavness, I.\n\n\n \n\n\n\n Plant Phenomics, 5: 0059. June 2023.\n \n\n\n\n
\n\n\n\n \n \n \"GlobalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{david_global_2023,\n\ttitle = {Global {Wheat} {Head} {Detection} {Challenges}: {Winning} {Models} and {Application} for {Head} {Counting}},\n\tvolume = {5},\n\tissn = {2643-6515},\n\tshorttitle = {Global {Wheat} {Head} {Detection} {Challenges}},\n\turl = {https://spj.science.org/doi/10.34133/plantphenomics.0059},\n\tdoi = {10.34133/plantphenomics.0059},\n\tabstract = {Data competitions have become a popular approach to crowdsource new data analysis methods for general and specialized data science problems. Data competitions have a rich history in plant phenotyping, and new outdoor field datasets have the potential to embrace solutions across research and commercial applications. We developed the Global Wheat Challenge as a generalization competition in 2020 and 2021 to find more robust solutions for wheat head detection using field images from different regions. We analyze the winning challenge solutions in terms of their robustness when applied to new datasets. We found that the design of the competition had an influence on the selection of winning solutions and provide recommendations for future competitions to encourage the selection of more robust solutions.},\n\tlanguage = {en},\n\turldate = {2023-07-31},\n\tjournal = {Plant Phenomics},\n\tauthor = {David, Etienne and Ogidi, Franklin and Smith, Daniel and Chapman, Scott and De Solan, Benoit and Guo, Wei and Baret, Frederic and Stavness, Ian},\n\tmonth = jun,\n\tyear = {2023},\n\tpages = {0059},\n}\n\n
\n
\n\n\n
\n Data competitions have become a popular approach to crowdsource new data analysis methods for general and specialized data science problems. Data competitions have a rich history in plant phenotyping, and new outdoor field datasets have the potential to embrace solutions across research and commercial applications. We developed the Global Wheat Challenge as a generalization competition in 2020 and 2021 to find more robust solutions for wheat head detection using field images from different regions. We analyze the winning challenge solutions in terms of their robustness when applied to new datasets. We found that the design of the competition had an influence on the selection of winning solutions and provide recommendations for future competitions to encourage the selection of more robust solutions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Analyzing Nitrogen Effects on Rice Panicle Development by Panicle Detection and Time-Series Tracking.\n \n \n \n \n\n\n \n Zhou, Q.; Guo, W.; Chen, N.; Wang, Z.; Li, G.; Ding, Y.; Ninomiya, S.; and Mu, Y.\n\n\n \n\n\n\n Plant Phenomics, 5: 0048. June 2023.\n \n\n\n\n
\n\n\n\n \n \n \"AnalyzingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{zhou_analyzing_2023,\n\ttitle = {Analyzing {Nitrogen} {Effects} on {Rice} {Panicle} {Development} by {Panicle} {Detection} and {Time}-{Series} {Tracking}},\n\tvolume = {5},\n\tissn = {2643-6515},\n\turl = {https://spj.science.org/doi/10.34133/plantphenomics.0048},\n\tdoi = {10.34133/plantphenomics.0048},\n\tabstract = {Detailed observation of the phenotypic changes in rice panicle substantially helps us to understand the yield formation. In recent studies, phenotyping of rice panicles during the heading–flowering stage still lacks comprehensive analysis, especially of panicle development under different nitrogen treatments. In this work, we proposed a pipeline to automatically acquire the detailed panicle traits based on time-series images by using the YOLO v5, ResNet50, and DeepSORT models. Combined with field observation data, the proposed method was used to test whether it has an ability to identify subtle differences in panicle developments under different nitrogen treatments. The result shows that panicle counting throughout the heading–flowering stage achieved high accuracy (\n              R\n              2\n              = 0.96 and RMSE = 1.73), and heading date was estimated with an absolute error of 0.25 days. In addition, by identical panicle tracking based on the time-series images, we analyzed detailed flowering phenotypic changes of a single panicle, such as flowering duration and individual panicle flowering time. For rice population, with an increase in the nitrogen application: panicle number increased, heading date changed little, but the duration was slightly extended; cumulative flowering panicle number increased, rice flowering initiation date arrived earlier while the ending date was later; thus, the flowering duration became longer. For a single panicle, identical panicle tracking revealed that higher nitrogen application led to earlier flowering initiation date, significantly longer flowering days, and significantly longer total duration from vigorous flowering beginning to the end (total DBE). However, the vigorous flowering beginning time showed no significant differences and there was a slight decrease in daily DBE.},\n\tlanguage = {en},\n\turldate = {2023-07-31},\n\tjournal = {Plant Phenomics},\n\tauthor = {Zhou, Qinyang and Guo, Wei and Chen, Na and Wang, Ze and Li, Ganghua and Ding, Yanfeng and Ninomiya, Seishi and Mu, Yue},\n\tmonth = jun,\n\tyear = {2023},\n\tpages = {0048},\n}\n\n
\n
\n\n\n
\n Detailed observation of the phenotypic changes in rice panicle substantially helps us to understand the yield formation. In recent studies, phenotyping of rice panicles during the heading–flowering stage still lacks comprehensive analysis, especially of panicle development under different nitrogen treatments. In this work, we proposed a pipeline to automatically acquire the detailed panicle traits based on time-series images by using the YOLO v5, ResNet50, and DeepSORT models. Combined with field observation data, the proposed method was used to test whether it has an ability to identify subtle differences in panicle developments under different nitrogen treatments. The result shows that panicle counting throughout the heading–flowering stage achieved high accuracy ( R 2 = 0.96 and RMSE = 1.73), and heading date was estimated with an absolute error of 0.25 days. In addition, by identical panicle tracking based on the time-series images, we analyzed detailed flowering phenotypic changes of a single panicle, such as flowering duration and individual panicle flowering time. For rice population, with an increase in the nitrogen application: panicle number increased, heading date changed little, but the duration was slightly extended; cumulative flowering panicle number increased, rice flowering initiation date arrived earlier while the ending date was later; thus, the flowering duration became longer. For a single panicle, identical panicle tracking revealed that higher nitrogen application led to earlier flowering initiation date, significantly longer flowering days, and significantly longer total duration from vigorous flowering beginning to the end (total DBE). However, the vigorous flowering beginning time showed no significant differences and there was a slight decrease in daily DBE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Drone-Based Harvest Data Prediction Can Reduce On-Farm Food Loss and Improve Farmer Income.\n \n \n \n \n\n\n \n Wang, H.; Li, T.; Nishida, E.; Kato, Y.; Fukano, Y.; and Guo, W.\n\n\n \n\n\n\n Plant Phenomics, 5: 0086. September 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Drone-BasedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{wang_drone-based_2023,\n\ttitle = {Drone-{Based} {Harvest} {Data} {Prediction} {Can} {Reduce} {On}-{Farm} {Food} {Loss} and {Improve} {Farmer} {Income}},\n\tvolume = {5},\n\tissn = {2643-6515},\n\turl = {https://spj.science.org/doi/10.34133/plantphenomics.0086},\n\tdoi = {10.34133/plantphenomics.0086},\n\tabstract = {On-farm food loss (i.e., grade-out vegetables) is a difficult challenge in sustainable agricultural systems. The simplest method to reduce the number of grade-out vegetables is to monitor and predict the size of all individuals in the vegetable field and determine the optimal harvest date with the smallest grade-out number and highest profit, which is not cost-effective by conventional methods. Here, we developed a full pipeline to accurately estimate and predict every broccoli head size ( \n              n \n              {\\textgreater} 3,000) automatically and nondestructively using drone remote sensing and image analysis. The individual sizes were fed to the temperature-based growth model and predicted the optimal harvesting date. Two years of field experiments revealed that our pipeline successfully estimated and predicted the head size of all broccolis with high accuracy. We also found that a deviation of only 1 to 2 days from the optimal date can considerably increase grade-out and reduce farmer's profits. This is an unequivocal demonstration of the utility of these approaches to economic crop optimization and minimization of food losses.},\n\tlanguage = {en},\n\turldate = {2023-09-10},\n\tjournal = {Plant Phenomics},\n\tauthor = {Wang, Haozhou and Li, Tang and Nishida, Erika and Kato, Yoichiro and Fukano, Yuya and Guo, Wei},\n\tmonth = sep,\n\tyear = {2023},\n\tpages = {0086},\n}\n\n
\n
\n\n\n
\n On-farm food loss (i.e., grade-out vegetables) is a difficult challenge in sustainable agricultural systems. The simplest method to reduce the number of grade-out vegetables is to monitor and predict the size of all individuals in the vegetable field and determine the optimal harvest date with the smallest grade-out number and highest profit, which is not cost-effective by conventional methods. Here, we developed a full pipeline to accurately estimate and predict every broccoli head size ( n \\textgreater 3,000) automatically and nondestructively using drone remote sensing and image analysis. The individual sizes were fed to the temperature-based growth model and predicted the optimal harvesting date. Two years of field experiments revealed that our pipeline successfully estimated and predicted the head size of all broccolis with high accuracy. We also found that a deviation of only 1 to 2 days from the optimal date can considerably increase grade-out and reduce farmer's profits. This is an unequivocal demonstration of the utility of these approaches to economic crop optimization and minimization of food losses.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Land Cover Mapping of Bengaluru's Urban and Surrounding Area with Spatiotemporal Open-source Remote Sensing Data.\n \n \n \n \n\n\n \n Grison, S.; Siddaganga, R.; Singh, R.; Hegde, S.; Brockmann, A.; Krishnan, S.; and Guo, W.\n\n\n \n\n\n\n Sensors and Materials, 35(11): 3829. November 2023.\n \n\n\n\n
\n\n\n\n \n \n \"LandPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{grison_land_2023,\n\ttitle = {Land {Cover} {Mapping} of {Bengaluru}'s {Urban} and {Surrounding} {Area} with {Spatiotemporal} {Open}-source {Remote} {Sensing} {Data}},\n\tvolume = {35},\n\tissn = {0914-4935, 2435-0869},\n\turl = {https://sensors.myu-group.co.jp/article.php?ss=4627},\n\tdoi = {10.18494/SAM4627},\n\tlanguage = {en},\n\tnumber = {11},\n\turldate = {2023-11-29},\n\tjournal = {Sensors and Materials},\n\tauthor = {Grison, Sylvain and Siddaganga, Rajath and Singh, Renu and Hegde, Shrihari and Brockmann, Axel and Krishnan, Smitha and Guo, Wei},\n\tmonth = nov,\n\tyear = {2023},\n\tpages = {3829},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n UAV-based individual Chinese cabbage weight prediction using multi-temporal data.\n \n \n \n \n\n\n \n Aguilar-Ariza, A.; Ishii, M.; Miyazaki, T.; Saito, A.; Khaing, H. P.; Phoo, H. W.; Kondo, T.; Fujiwara, T.; Guo, W.; and Kamiya, T.\n\n\n \n\n\n\n Scientific Reports, 13(1): 20122. November 2023.\n \n\n\n\n
\n\n\n\n \n \n \"UAV-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{aguilar-ariza_uav-based_2023,\n\ttitle = {{UAV}-based individual {Chinese} cabbage weight prediction using multi-temporal data},\n\tvolume = {13},\n\tissn = {2045-2322},\n\turl = {https://www.nature.com/articles/s41598-023-47431-y},\n\tdoi = {10.1038/s41598-023-47431-y},\n\tabstract = {Abstract\n            \n              The use of unmanned aerial vehicles (UAVs) has facilitated crop canopy monitoring, enabling yield prediction by integrating regression models. However, the application of UAV-based data to individual-level harvest weight prediction is limited by the effectiveness of obtaining individual features. In this study, we propose a method that automatically detects and extracts multitemporal individual plant features derived from UAV-based data to predict harvest weight. We acquired data from an experimental field sown with 1196 Chinese cabbage plants, using two cameras (RGB and multi-spectral) mounted on UAVs. First, we used three RGB orthomosaic images and an object detection algorithm to detect more than 95\\% of the individual plants. Next, we used feature selection methods and five different multi-temporal resolutions to predict individual plant weights, achieving a coefficient of determination (R\n              2\n              ) of 0.86 and a root mean square error (RMSE) of 436 g/plant. Furthermore, we achieved predictions with an R\n              2\n              greater than 0.72 and an RMSE less than 560 g/plant up to 53 days prior to harvest. These results demonstrate the feasibility of accurately predicting individual Chinese cabbage harvest weight using UAV-based data and the efficacy of utilizing multi-temporal features to predict plant weight more than one month prior to harvest.},\n\tlanguage = {en},\n\tnumber = {1},\n\turldate = {2023-11-30},\n\tjournal = {Scientific Reports},\n\tauthor = {Aguilar-Ariza, Andrés and Ishii, Masanori and Miyazaki, Toshio and Saito, Aika and Khaing, Hlaing Phyoe and Phoo, Hnin Wint and Kondo, Tomohiro and Fujiwara, Toru and Guo, Wei and Kamiya, Takehiro},\n\tmonth = nov,\n\tyear = {2023},\n\tpages = {20122},\n}\n\n
\n
\n\n\n
\n Abstract The use of unmanned aerial vehicles (UAVs) has facilitated crop canopy monitoring, enabling yield prediction by integrating regression models. However, the application of UAV-based data to individual-level harvest weight prediction is limited by the effectiveness of obtaining individual features. In this study, we propose a method that automatically detects and extracts multitemporal individual plant features derived from UAV-based data to predict harvest weight. We acquired data from an experimental field sown with 1196 Chinese cabbage plants, using two cameras (RGB and multi-spectral) mounted on UAVs. First, we used three RGB orthomosaic images and an object detection algorithm to detect more than 95% of the individual plants. Next, we used feature selection methods and five different multi-temporal resolutions to predict individual plant weights, achieving a coefficient of determination (R 2 ) of 0.86 and a root mean square error (RMSE) of 436 g/plant. Furthermore, we achieved predictions with an R 2 greater than 0.72 and an RMSE less than 560 g/plant up to 53 days prior to harvest. These results demonstrate the feasibility of accurately predicting individual Chinese cabbage harvest weight using UAV-based data and the efficacy of utilizing multi-temporal features to predict plant weight more than one month prior to harvest.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n 拡張する目で農業生産を支援.\n \n \n \n \n\n\n \n 郭, 威\n\n\n \n\n\n\n 弥生, 76: 2–3. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"拡張する目で農業生産を支援Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 7 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{__2023-1,\n\ttitle = {拡張する目で農業生産を支援},\n\tvolume = {76},\n\turl = {https://www.a.u-tokyo.ac.jp/pr-yayoi/76/#page=3},\n\tabstract = {農作物観察は栽培管理の基本です。私たちは工学・情報科学と農学・植物科学を融合して、作物を見る人の目を拡張し凌駕することで作物育種や栽培管理技術にイノベーションを起こすための研究\nをしています。},\n\tjournal = {弥生},\n\tauthor = {郭, 威},\n\tyear = {2023},\n\tpages = {2--3},\n}\n\n
\n
\n\n\n
\n 農作物観察は栽培管理の基本です。私たちは工学・情報科学と農学・植物科学を融合して、作物を見る人の目を拡張し凌駕することで作物育種や栽培管理技術にイノベーションを起こすための研究 をしています。\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n スマート農業を身近に フィールドICT研究・実習教育の取り入れ.\n \n \n \n \n\n\n \n 郭, 威\n\n\n \n\n\n\n Agrio, 477: 10–12. November 2023.\n \n\n\n\n
\n\n\n\n \n \n \"スマート農業を身近にPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 7 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{__2023,\n\ttitle = {スマート農業を身近に フィールドICT研究・実習教育の取り入れ},\n\tvolume = {477},\n\turl = {https://www.jiji.co.jp/service/agrio/},\n\tlanguage = {日本語},\n\tjournal = {Agrio},\n\tauthor = {郭, 威},\n\tmonth = nov,\n\tyear = {2023},\n\tpages = {10--12},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Development of a high-throughput field phenotyping rover optimized for size-limited breeding fields as open-source hardware.\n \n \n \n \n\n\n \n Kuroki, K.; Yan, K.; Iwata, H.; Shimizu, K. K.; Tameshige, T.; Nasuda, S.; and Guo, W.\n\n\n \n\n\n\n Breeding Science, 72(1): 66–74. March 2022.\n \n\n\n\n
\n\n\n\n \n \n \"DevelopmentPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{kuroki_development_2022,\n\ttitle = {Development of a high-throughput field phenotyping rover optimized for size-limited breeding fields as open-source hardware},\n\tvolume = {72},\n\tissn = {1344-7610, 1347-3735},\n\turl = {https://www.jstage.jst.go.jp/article/jsbbs/72/1/72_21059/_article},\n\tdoi = {10.1270/jsbbs.21059},\n\tabstract = {Phenotyping is a critical process in plant breeding, especially when there is an increasing demand for streamlining a selection process in a breeding program. Since manual phenotyping has limited efficiency, high-throughput phenotyping methods are recently popularized owing to progress in sensor and image processing technologies. However, in a size-limited breeding field, which is common in Japan and other Asian countries, it is challenging to introduce large machinery in the field or fly unmanned aerial vehicles over the field. In this study, we developed a ground-based high-throughput field phenotyping rover that could be easily introduced to a field regardless of the scale and location of the field even without special facilities. We also made the field rover open-source hardware, making its system available to public for easy modification, so that anyone can build one for their own use at a low cost. The trial run of the field rover revealed that it allowed the collection of detailed remote-sensing images of plants and quantitative analyses based on the images. The results suggest that the field rover developed in this study could allow efficient phenotyping of plants especially in a small breeding field.},\n\tlanguage = {en},\n\tnumber = {1},\n\turldate = {2022-04-12},\n\tjournal = {Breeding Science},\n\tauthor = {Kuroki, Ken and Yan, Kai and Iwata, Hiroyoshi and Shimizu, Kentaro K. and Tameshige, Toshiaki and Nasuda, Shuhei and Guo, Wei},\n\tmonth = mar,\n\tyear = {2022},\n\tpages = {66--74},\n}\n\n
\n
\n\n\n
\n Phenotyping is a critical process in plant breeding, especially when there is an increasing demand for streamlining a selection process in a breeding program. Since manual phenotyping has limited efficiency, high-throughput phenotyping methods are recently popularized owing to progress in sensor and image processing technologies. However, in a size-limited breeding field, which is common in Japan and other Asian countries, it is challenging to introduce large machinery in the field or fly unmanned aerial vehicles over the field. In this study, we developed a ground-based high-throughput field phenotyping rover that could be easily introduced to a field regardless of the scale and location of the field even without special facilities. We also made the field rover open-source hardware, making its system available to public for easy modification, so that anyone can build one for their own use at a low cost. The trial run of the field rover revealed that it allowed the collection of detailed remote-sensing images of plants and quantitative analyses based on the images. The results suggest that the field rover developed in this study could allow efficient phenotyping of plants especially in a small breeding field.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep-learning-based in-field citrus fruit detection and tracking.\n \n \n \n \n\n\n \n Zhang, W.; Wang, J.; Liu, Y.; Chen, K.; Li, H.; Duan, Y.; Wu, W.; Shi, Y.; and Guo, W.\n\n\n \n\n\n\n Horticulture Research,uhac003. February 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Deep-learning-basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{zhang_deep-learning-based_2022,\n\ttitle = {Deep-learning-based in-field citrus fruit detection and tracking},\n\tissn = {2052-7276},\n\turl = {https://academic.oup.com/hr/advance-article/doi/10.1093/hr/uhac003/6526907},\n\tdoi = {10.1093/hr/uhac003},\n\tabstract = {Abstract\n            Fruit yield estimation is crucial to establish fruit harvesting and marketing strategies. Recently, computer vision and deep learning techniques have been used to estimate citrus fruit yield and have exhibited a notable fruit detection ability. However, computer-vision-based citrus fruit counting has two key limitations: inconsistent fruit detection accuracy and double-counting of the same fruit. Using oranges as the experimental material, this paper proposes a deep-learning-based orange counting algorithm using video sequences to help overcome these problems. The algorithm consists of two sub-algorithms, OrangeYolo for fruit detection and OrangeSort for fruit tracking. The OrangeYolo backbone network is partially based on the YOLOv3 algorithm and improved upon to detect small object fruits at multiple scales. The network structure was adjusted to detect small-scale targets while enabling multiscale target detection. A channel attention and spatial attention multiscale fusion module was introduced to fuse the semantic features of the deep network with the shallow textural detail features. OrangeYolo can reach mean Average Precision (mAP) to 0.957 in the citrus dataset, which is higher than the 0.905, 0.911, and 0.917 that the YOLOv3, YOLOv4 and YOLOv5 algorithms. OrangeSort was designed to alleviate the double-counting problem of occluded fruits. A specific tracking region counting strategy and tracking algorithm based on motion displacement estimation are established. Six video sequences, which were taken from two fields containing 22 trees, were used as a validation dataset. The proposed method showed better performance (Mean Absolute Error(MAE) = 0.081, Standard Deviation(SD) = 0.08) compared to video-based manual counting and demonstrated more accurate results compared with existing standard Sort and DeepSort (MAE = 0.45, 1.212; SD = 0.4741, 1.3975; respectively).},\n\tlanguage = {en},\n\turldate = {2022-04-12},\n\tjournal = {Horticulture Research},\n\tauthor = {Zhang, Wenli and Wang, Jiaqi and Liu, Yuxin and Chen, Kaizhen and Li, Huibin and Duan, Yulin and Wu, Wenbin and Shi, Yun and Guo, Wei},\n\tmonth = feb,\n\tyear = {2022},\n\tpages = {uhac003},\n}\n\n
\n
\n\n\n
\n Abstract Fruit yield estimation is crucial to establish fruit harvesting and marketing strategies. Recently, computer vision and deep learning techniques have been used to estimate citrus fruit yield and have exhibited a notable fruit detection ability. However, computer-vision-based citrus fruit counting has two key limitations: inconsistent fruit detection accuracy and double-counting of the same fruit. Using oranges as the experimental material, this paper proposes a deep-learning-based orange counting algorithm using video sequences to help overcome these problems. The algorithm consists of two sub-algorithms, OrangeYolo for fruit detection and OrangeSort for fruit tracking. The OrangeYolo backbone network is partially based on the YOLOv3 algorithm and improved upon to detect small object fruits at multiple scales. The network structure was adjusted to detect small-scale targets while enabling multiscale target detection. A channel attention and spatial attention multiscale fusion module was introduced to fuse the semantic features of the deep network with the shallow textural detail features. OrangeYolo can reach mean Average Precision (mAP) to 0.957 in the citrus dataset, which is higher than the 0.905, 0.911, and 0.917 that the YOLOv3, YOLOv4 and YOLOv5 algorithms. OrangeSort was designed to alleviate the double-counting problem of occluded fruits. A specific tracking region counting strategy and tracking algorithm based on motion displacement estimation are established. Six video sequences, which were taken from two fields containing 22 trees, were used as a validation dataset. The proposed method showed better performance (Mean Absolute Error(MAE) = 0.081, Standard Deviation(SD) = 0.08) compared to video-based manual counting and demonstrated more accurate results compared with existing standard Sort and DeepSort (MAE = 0.45, 1.212; SD = 0.4741, 1.3975; respectively).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n How Useful Is Image-Based Active Learning for Plant Organ Segmentation?.\n \n \n \n \n\n\n \n Rawat, S.; Chandra, A. L.; Desai, S. V.; Balasubramanian, V. N.; Ninomiya, S.; and Guo, W.\n\n\n \n\n\n\n Plant Phenomics, 2022: 1–11. February 2022.\n \n\n\n\n
\n\n\n\n \n \n \"HowPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{rawat_how_2022,\n\ttitle = {How {Useful} {Is} {Image}-{Based} {Active} {Learning} for {Plant} {Organ} {Segmentation}?},\n\tvolume = {2022},\n\tissn = {2643-6515},\n\turl = {https://spj.sciencemag.org/journals/plantphenomics/2022/9795275/},\n\tdoi = {10.34133/2022/9795275},\n\tabstract = {Training deep learning models typically requires a huge amount of labeled data which is expensive to acquire, especially in dense prediction tasks such as semantic segmentation. Moreover, plant phenotyping datasets pose additional challenges of heavy occlusion and varied lighting conditions which makes annotations more time-consuming to obtain. Active learning helps in reducing the annotation cost by selecting samples for labeling which are most informative to the model, thus improving model performance with fewer annotations. Active learning for semantic segmentation has been well studied on datasets such as PASCAL VOC and Cityscapes. However, its effectiveness on plant datasets has not received much importance. To bridge this gap, we empirically study and benchmark the effectiveness of four uncertainty-based active learning strategies on three natural plant organ segmentation datasets. We also study their behaviour in response to variations in training configurations in terms of augmentations used, the scale of training images, active learning batch sizes, and train-validation set splits.},\n\tlanguage = {en},\n\turldate = {2022-04-12},\n\tjournal = {Plant Phenomics},\n\tauthor = {Rawat, Shivangana and Chandra, Akshay L. and Desai, Sai Vikas and Balasubramanian, Vineeth N. and Ninomiya, Seishi and Guo, Wei},\n\tmonth = feb,\n\tyear = {2022},\n\tpages = {1--11},\n}\n\n
\n
\n\n\n
\n Training deep learning models typically requires a huge amount of labeled data which is expensive to acquire, especially in dense prediction tasks such as semantic segmentation. Moreover, plant phenotyping datasets pose additional challenges of heavy occlusion and varied lighting conditions which makes annotations more time-consuming to obtain. Active learning helps in reducing the annotation cost by selecting samples for labeling which are most informative to the model, thus improving model performance with fewer annotations. Active learning for semantic segmentation has been well studied on datasets such as PASCAL VOC and Cityscapes. However, its effectiveness on plant datasets has not received much importance. To bridge this gap, we empirically study and benchmark the effectiveness of four uncertainty-based active learning strategies on three natural plant organ segmentation datasets. We also study their behaviour in response to variations in training configurations in terms of augmentations used, the scale of training images, active learning batch sizes, and train-validation set splits.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep-Learning-Based Multispectral Image Reconstruction from Single Natural Color RGB Image—Enhancing UAV-Based Phenotyping.\n \n \n \n \n\n\n \n Zhao, J.; Kumar, A.; Banoth, B. N.; Marathi, B.; Rajalakshmi, P.; Rewald, B.; Ninomiya, S.; and Guo, W.\n\n\n \n\n\n\n Remote Sensing, 14(5): 1272. March 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Deep-Learning-BasedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{zhao_deep-learning-based_2022,\n\ttitle = {Deep-{Learning}-{Based} {Multispectral} {Image} {Reconstruction} from {Single} {Natural} {Color} {RGB} {Image}—{Enhancing} {UAV}-{Based} {Phenotyping}},\n\tvolume = {14},\n\tissn = {2072-4292},\n\turl = {https://www.mdpi.com/2072-4292/14/5/1272},\n\tdoi = {10.3390/rs14051272},\n\tabstract = {Multispectral images (MSIs) are valuable for precision agriculture due to the extra spectral information acquired compared to natural color RGB (ncRGB) images. In this paper, we thus aim to generate high spatial MSIs through a robust, deep-learning-based reconstruction method using ncRGB images. Using the data from the agronomic research trial for maize and breeding research trial for rice, we first reproduced ncRGB images from MSIs through a rendering model, Model-True to natural color image (Model-TN), which was built using a benchmark hyperspectral image dataset. Subsequently, an MSI reconstruction model, Model-Natural color to Multispectral image (Model-NM), was trained based on prepared ncRGB (ncRGB-Con) images and MSI pairs, ensuring the model can use widely available ncRGB images as input. The integrated loss function of mean relative absolute error (MRAEloss) and spectral information divergence (SIDloss) were most effective during the building of both models, while models using the MRAEloss function were more robust towards variability between growing seasons and species. The reliability of the reconstructed MSIs was demonstrated by high coefficients of determination compared to ground truth values, using the Normalized Difference Vegetation Index (NDVI) as an example. The advantages of using “reconstructed” NDVI over Triangular Greenness Index (TGI), as calculated directly from RGB images, were illustrated by their higher capabilities in differentiating three levels of irrigation treatments on maize plants. This study emphasizes that the performance of MSI reconstruction models could benefit from an optimized loss function and the intermediate step of ncRGB image preparation. The ability of the developed models to reconstruct high-quality MSIs from low-cost ncRGB images will, in particular, promote the application for plant phenotyping in precision agriculture.},\n\tlanguage = {en},\n\tnumber = {5},\n\turldate = {2022-04-12},\n\tjournal = {Remote Sensing},\n\tauthor = {Zhao, Jiangsan and Kumar, Ajay and Banoth, Balaji Naik and Marathi, Balram and Rajalakshmi, Pachamuthu and Rewald, Boris and Ninomiya, Seishi and Guo, Wei},\n\tmonth = mar,\n\tyear = {2022},\n\tpages = {1272},\n}\n\n
\n
\n\n\n
\n Multispectral images (MSIs) are valuable for precision agriculture due to the extra spectral information acquired compared to natural color RGB (ncRGB) images. In this paper, we thus aim to generate high spatial MSIs through a robust, deep-learning-based reconstruction method using ncRGB images. Using the data from the agronomic research trial for maize and breeding research trial for rice, we first reproduced ncRGB images from MSIs through a rendering model, Model-True to natural color image (Model-TN), which was built using a benchmark hyperspectral image dataset. Subsequently, an MSI reconstruction model, Model-Natural color to Multispectral image (Model-NM), was trained based on prepared ncRGB (ncRGB-Con) images and MSI pairs, ensuring the model can use widely available ncRGB images as input. The integrated loss function of mean relative absolute error (MRAEloss) and spectral information divergence (SIDloss) were most effective during the building of both models, while models using the MRAEloss function were more robust towards variability between growing seasons and species. The reliability of the reconstructed MSIs was demonstrated by high coefficients of determination compared to ground truth values, using the Normalized Difference Vegetation Index (NDVI) as an example. The advantages of using “reconstructed” NDVI over Triangular Greenness Index (TGI), as calculated directly from RGB images, were illustrated by their higher capabilities in differentiating three levels of irrigation treatments on maize plants. This study emphasizes that the performance of MSI reconstruction models could benefit from an optimized loss function and the intermediate step of ncRGB image preparation. The ability of the developed models to reconstruct high-quality MSIs from low-cost ncRGB images will, in particular, promote the application for plant phenotyping in precision agriculture.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n High-throughput field crop phenotyping: current status and challenges.\n \n \n \n \n\n\n \n Ninomiya, S.\n\n\n \n\n\n\n Breeding Science, 72(1): 3–18. December 2022.\n \n\n\n\n
\n\n\n\n \n \n \"High-throughputPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{ninomiya_high-throughput_2022,\n\ttitle = {High-throughput field crop phenotyping: current status and challenges},\n\tvolume = {72},\n\tissn = {1344-7610, 1347-3735},\n\tshorttitle = {High-throughput field crop phenotyping},\n\turl = {https://www.jstage.jst.go.jp/article/jsbbs/72/1/72_21069/_article},\n\tdoi = {10.1270/jsbbs.21069},\n\tabstract = {In contrast to the rapid advances made in plant genotyping, plant phenotyping is considered a bottleneck in plant science. This has promoted high-throughput plant phenotyping (HTP) studies, resulting in an exponential increase in phenotyping-related publications. The development of HTP was originally intended for use as indoor HTP technologies for model plant species under controlled environments. However, this subsequently shifted to HTP for use in crops in fields. Although HTP in fields is much more difficult to conduct due to unstable environmental conditions compared to HTP in controlled environments, recent advances in HTP technology have allowed these difficulties to be overcome, allowing for rapid, efficient, non-destructive, non-invasive, quantitative, repeatable, and objective phenotyping. Recent HTP developments have been accelerated by the advances in data analysis, sensors, and robot technologies, including machine learning, image analysis, three dimensional (3D) reconstruction, image sensors, laser sensors, environmental sensors, and drones, along with high-speed computational resources. This article provides an overview of recent HTP technologies, focusing mainly on canopy-based phenotypes of major crops, such as canopy height, canopy coverage, canopy biomass, and canopy stressed appearance, in addition to crop organ detection and counting in the fields. Current topics in field HTP are also presented, followed by a discussion on the low rates of adoption of HTP in practical breeding programs.},\n\tlanguage = {en},\n\tnumber = {1},\n\turldate = {2022-04-12},\n\tjournal = {Breeding Science},\n\tauthor = {Ninomiya, Seishi},\n\tmonth = dec,\n\tyear = {2022},\n\tpages = {3--18},\n}\n\n
\n
\n\n\n
\n In contrast to the rapid advances made in plant genotyping, plant phenotyping is considered a bottleneck in plant science. This has promoted high-throughput plant phenotyping (HTP) studies, resulting in an exponential increase in phenotyping-related publications. The development of HTP was originally intended for use as indoor HTP technologies for model plant species under controlled environments. However, this subsequently shifted to HTP for use in crops in fields. Although HTP in fields is much more difficult to conduct due to unstable environmental conditions compared to HTP in controlled environments, recent advances in HTP technology have allowed these difficulties to be overcome, allowing for rapid, efficient, non-destructive, non-invasive, quantitative, repeatable, and objective phenotyping. Recent HTP developments have been accelerated by the advances in data analysis, sensors, and robot technologies, including machine learning, image analysis, three dimensional (3D) reconstruction, image sensors, laser sensors, environmental sensors, and drones, along with high-speed computational resources. This article provides an overview of recent HTP technologies, focusing mainly on canopy-based phenotypes of major crops, such as canopy height, canopy coverage, canopy biomass, and canopy stressed appearance, in addition to crop organ detection and counting in the fields. Current topics in field HTP are also presented, followed by a discussion on the low rates of adoption of HTP in practical breeding programs.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Endmember-Assisted Camera Response Function Learning, Toward Improving Hyperspectral Image Super-Resolution Performance.\n \n \n \n \n\n\n \n Zhao, J.; Qu, Y.; Ninomiya, S.; and Guo, W.\n\n\n \n\n\n\n IEEE Transactions on Geoscience and Remote Sensing, 60: 1–14. June 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Endmember-AssistedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{zhao_endmember-assisted_2022,\n\ttitle = {Endmember-{Assisted} {Camera} {Response} {Function} {Learning}, {Toward} {Improving} {Hyperspectral} {Image} {Super}-{Resolution} {Performance}},\n\tvolume = {60},\n\tissn = {1558-0644},\n\turl = {https://ieeexplore.ieee.org/document/9794687},\n\tdoi = {10.1109/TGRS.2022.3182425},\n\tabstract = {The camera response function (CRF) that projects hyperspectral radiance to the corresponding RGB images is important for most hyperspectral image super-resolution (HSI-SR) models. In contrast to most studies that focus on improving HSI-SR performance through new architectures, we aim to prevent the model performance drop by learning the CRF of any given HSIs and RGB image from the same scene in an unsupervised manner, independent of the HSI-SR network. Accordingly, we first decompose the given RGB image into endmembers and an abundance map using the Dirichlet autoencoder architecture. Thereafter, a linear CRF learning network is optimized to project the reference HSIs to the RGB image that can be similarly decomposed like the given RGB, assuming that objects in both images share the same endmembers and abundance map. The quality of the RGB images generated from the learned CRFs is compared with that of the corresponding ground-truth images based on the true CRFs of two consumer-level cameras Nikon 700D and Canon 500D. We demonstrate that the effectively learned CRFs can prevent significant performance drop in three popular HSI-SR models on RGB images from different categories of standard datasets of CAVE, ICVL, Chikusei, Cuprite, Salinas, and KSC. The successfully learned CRF using the method proposed in this study would largely promote a wider implementation of HSI-SR models since tremendous performance drop can be prevented practically.},\n\tjournal = {IEEE Transactions on Geoscience and Remote Sensing},\n\tauthor = {Zhao, Jiangsan and Qu, Ying and Ninomiya, Seishi and Guo, Wei},\n\tmonth = jun,\n\tyear = {2022},\n\tkeywords = {Abundance map, Cameras, Hyperspectral imaging, Image color analysis, Image reconstruction, Imaging, Spatial resolution, Superresolution, camera response function (CRF), endmember, hyperspectral image (HSI), super-resolution, unsupervised deep learning},\n\tpages = {1--14},\n}\n\n
\n
\n\n\n
\n The camera response function (CRF) that projects hyperspectral radiance to the corresponding RGB images is important for most hyperspectral image super-resolution (HSI-SR) models. In contrast to most studies that focus on improving HSI-SR performance through new architectures, we aim to prevent the model performance drop by learning the CRF of any given HSIs and RGB image from the same scene in an unsupervised manner, independent of the HSI-SR network. Accordingly, we first decompose the given RGB image into endmembers and an abundance map using the Dirichlet autoencoder architecture. Thereafter, a linear CRF learning network is optimized to project the reference HSIs to the RGB image that can be similarly decomposed like the given RGB, assuming that objects in both images share the same endmembers and abundance map. The quality of the RGB images generated from the learned CRFs is compared with that of the corresponding ground-truth images based on the true CRFs of two consumer-level cameras Nikon 700D and Canon 500D. We demonstrate that the effectively learned CRFs can prevent significant performance drop in three popular HSI-SR models on RGB images from different categories of standard datasets of CAVE, ICVL, Chikusei, Cuprite, Salinas, and KSC. The successfully learned CRF using the method proposed in this study would largely promote a wider implementation of HSI-SR models since tremendous performance drop can be prevented practically.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Periodically taken photographs reveal the effect of pollinator insects on seed set in lotus flowers.\n \n \n \n \n\n\n \n Nagai, M.; Higuchi, Y.; Ishikawa, Y.; Guo, W.; Fukatsu, T.; Baba, Y. G.; and Takada, M. B.\n\n\n \n\n\n\n Scientific Reports, 12(1): 11051. July 2022.\n \n\n\n\n
\n\n\n\n \n \n \"PeriodicallyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{nagai_periodically_2022,\n\ttitle = {Periodically taken photographs reveal the effect of pollinator insects on seed set in lotus flowers},\n\tvolume = {12},\n\tcopyright = {2022 The Author(s)},\n\tissn = {2045-2322},\n\turl = {https://www.nature.com/articles/s41598-022-15090-0},\n\tdoi = {10.1038/s41598-022-15090-0},\n\tabstract = {Understanding of pollination systems is an important topic for evolutionary ecology, food production, and biodiversity conservation. However, it is difficult to grasp the whole picture of an individual system, because the activity of pollinators fluctuates depending on the flowering period and time of day. In order to reveal effective pollinator taxa and timing of visitation to the reproductive success of plants under the complex biological interactions and fluctuating abiotic factors, we developed an automatic system to take photographs at 5-s intervals to get near-complete flower visitation by pollinators during the entire flowering period of selected flowers of Nelumbo nucifera and track the reproductive success of the same flowers until fruiting. Bee visits during the early morning hours of 05:00–07:59 on the second day of flowering under optimal temperatures with no rainfall or strong winds contributed strongly to seed set, with possible indirect negative effects by predators of the pollinators. Our results indicate the availability of periodic and consecutive photography system in clarifying the plant-pollinator interaction and its consequence to reproductive success of the plant. Further development is required to build a monitoring system to collect higher-resolution time-lapse images and automatically identify visiting insect species in the natural environment.},\n\tlanguage = {en},\n\tnumber = {1},\n\turldate = {2022-07-11},\n\tjournal = {Scientific Reports},\n\tauthor = {Nagai, Mihoko and Higuchi, Yohei and Ishikawa, Yusei and Guo, Wei and Fukatsu, Tokihiro and Baba, Yuki G. and Takada, Mayura B.},\n\tmonth = jul,\n\tyear = {2022},\n\tkeywords = {Ecological networks, Pollination},\n\tpages = {11051},\n}\n\n
\n
\n\n\n
\n Understanding of pollination systems is an important topic for evolutionary ecology, food production, and biodiversity conservation. However, it is difficult to grasp the whole picture of an individual system, because the activity of pollinators fluctuates depending on the flowering period and time of day. In order to reveal effective pollinator taxa and timing of visitation to the reproductive success of plants under the complex biological interactions and fluctuating abiotic factors, we developed an automatic system to take photographs at 5-s intervals to get near-complete flower visitation by pollinators during the entire flowering period of selected flowers of Nelumbo nucifera and track the reproductive success of the same flowers until fruiting. Bee visits during the early morning hours of 05:00–07:59 on the second day of flowering under optimal temperatures with no rainfall or strong winds contributed strongly to seed set, with possible indirect negative effects by predators of the pollinators. Our results indicate the availability of periodic and consecutive photography system in clarifying the plant-pollinator interaction and its consequence to reproductive success of the plant. Further development is required to build a monitoring system to collect higher-resolution time-lapse images and automatically identify visiting insect species in the natural environment.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Image-Based Phenotyping for Non-Destructive In Situ Rice (Oryza sativa L.) Tiller Counting Using Proximal Sensing.\n \n \n \n \n\n\n \n Yamagishi, Y.; Kato, Y.; Ninomiya, S.; and Guo, W.\n\n\n \n\n\n\n Sensors, 22(15). July 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Image-BasedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{yamagishi_image-based_2022,\n\ttitle = {Image-{Based} {Phenotyping} for {Non}-{Destructive} {In} {Situ} {Rice} ({Oryza} sativa {L}.) {Tiller} {Counting} {Using} {Proximal} {Sensing}},\n\tvolume = {22},\n\tissn = {1424-8220},\n\turl = {https://www.mdpi.com/1424-8220/22/15/5547},\n\tdoi = {10.3390/s22155547},\n\tabstract = {The increase in the number of tillers of rice significantly affects grain yield. However, this is measured only by the manual counting of emerging tillers, where the most common method is to count by hand touching. This study develops an efficient, non-destructive method for estimating the number of tillers during the vegetative and reproductive stages under flooded conditions. Unlike popular deep-learning-based approaches requiring training data and computational resources, we propose a simple image-processing pipeline following the empirical principles of synchronously emerging leaves and tillers in rice morphogenesis. Field images were taken by an unmanned aerial vehicle at a very low flying height for UAV imaging\\&mdash;1.5 to 3 m above the rice canopy. Subsequently, the proposed image-processing pipeline was used, which includes binarization, skeletonization, and leaf-tip detection, to count the number of long-growing leaves. The tiller number was estimated from the number of long-growing leaves. The estimated tiller number in a 1.1 m \\&times; 1.1 m area is significantly correlated with the actual number of tillers, with 60\\% of hills having an error of less than \\&plusmn;3 tillers. This study demonstrates the potential of the proposed image-sensing-based tiller-counting method to help agronomists with efficient, non-destructive field phenotyping.},\n\tnumber = {15},\n\tjournal = {Sensors},\n\tauthor = {Yamagishi, Yuki and Kato, Yoichiro and Ninomiya, Seishi and Guo, Wei},\n\tmonth = jul,\n\tyear = {2022},\n}\n\n
\n
\n\n\n
\n The increase in the number of tillers of rice significantly affects grain yield. However, this is measured only by the manual counting of emerging tillers, where the most common method is to count by hand touching. This study develops an efficient, non-destructive method for estimating the number of tillers during the vegetative and reproductive stages under flooded conditions. Unlike popular deep-learning-based approaches requiring training data and computational resources, we propose a simple image-processing pipeline following the empirical principles of synchronously emerging leaves and tillers in rice morphogenesis. Field images were taken by an unmanned aerial vehicle at a very low flying height for UAV imaging—1.5 to 3 m above the rice canopy. Subsequently, the proposed image-processing pipeline was used, which includes binarization, skeletonization, and leaf-tip detection, to count the number of long-growing leaves. The tiller number was estimated from the number of long-growing leaves. The estimated tiller number in a 1.1 m × 1.1 m area is significantly correlated with the actual number of tillers, with 60% of hills having an error of less than ±3 tillers. This study demonstrates the potential of the proposed image-sensing-based tiller-counting method to help agronomists with efficient, non-destructive field phenotyping.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n EasyDAM_V2: Efficient Data Labeling Method for Multishape, Cross-Species Fruit Detection.\n \n \n \n \n\n\n \n Zhang, W.; Chen, K.; Zheng, C.; Liu, Y.; and Guo, W.\n\n\n \n\n\n\n Plant Phenomics, 2022. September 2022.\n \n\n\n\n
\n\n\n\n \n \n \"EasyDAM_V2:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{zhang_easydam_v2_2022,\n\ttitle = {{EasyDAM}\\_V2: {Efficient} {Data} {Labeling} {Method} for {Multishape}, {Cross}-{Species} {Fruit} {Detection}},\n\tvolume = {2022},\n\tshorttitle = {{EasyDAM}\\_V2},\n\turl = {https://spj.sciencemag.org/journals/plantphenomics/2022/9761674/},\n\tdoi = {10.34133/2022/9761674},\n\tabstract = {In modern smart orchards, fruit detection models based on deep learning require expensive dataset labeling work to support the construction of detection models, resulting in high model application costs. Our previous work combined generative adversarial networks (GANs) and pseudolabeling methods to transfer labels from one specie to another to save labeling costs. However, only the color and texture features of images can be migrated, which still needs improvement in the accuracy of the data labeling. Therefore, this study proposes an EasyDAM\\_V2 model as an improved data labeling method for multishape and cross-species fruit detection. First, an image translation network named the Across-CycleGAN is proposed to generate fruit images from the source domain (fruit image with labels) to the target domain (fruit image without labels) even with partial shape differences. Then, a pseudolabel adaptive threshold selection strategy was designed to adjust the confidence threshold of the fruit detection model adaptively and dynamically update the pseudolabel to generate labels for images from the unlabeled target domain. In this paper, we use a labeled orange dataset as the source domain, and a pitaya, a mango dataset as the target domain, to evaluate the performance of the proposed method. The results showed that the average labeling precision values of the pitaya and mango datasets were 82.1\\&\\#x0025; and 85.0\\&\\#x0025;, respectively. Therefore, the proposed EasyDAM\\_V2 model is proven to be used for label transfer of cross-species fruit even with partial shape differences to reduce the cost of data labeling.},\n\tlanguage = {en},\n\turldate = {2022-09-13},\n\tjournal = {Plant Phenomics},\n\tauthor = {Zhang, Wenli and Chen, Kaizhen and Zheng, Chao and Liu, Yuxin and Guo, Wei},\n\tmonth = sep,\n\tyear = {2022},\n}\n\n
\n
\n\n\n
\n In modern smart orchards, fruit detection models based on deep learning require expensive dataset labeling work to support the construction of detection models, resulting in high model application costs. Our previous work combined generative adversarial networks (GANs) and pseudolabeling methods to transfer labels from one specie to another to save labeling costs. However, only the color and texture features of images can be migrated, which still needs improvement in the accuracy of the data labeling. Therefore, this study proposes an EasyDAM_V2 model as an improved data labeling method for multishape and cross-species fruit detection. First, an image translation network named the Across-CycleGAN is proposed to generate fruit images from the source domain (fruit image with labels) to the target domain (fruit image without labels) even with partial shape differences. Then, a pseudolabel adaptive threshold selection strategy was designed to adjust the confidence threshold of the fruit detection model adaptively and dynamically update the pseudolabel to generate labels for images from the unlabeled target domain. In this paper, we use a labeled orange dataset as the source domain, and a pitaya, a mango dataset as the target domain, to evaluate the performance of the proposed method. The results showed that the average labeling precision values of the pitaya and mango datasets were 82.1% and 85.0%, respectively. Therefore, the proposed EasyDAM_V2 model is proven to be used for label transfer of cross-species fruit even with partial shape differences to reduce the cost of data labeling.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Augmentation Method for High Intra-Class Variation Data in Apple Detection.\n \n \n \n \n\n\n \n Li, H.; Guo, W.; Lu, G.; and Shi, Y.\n\n\n \n\n\n\n Sensors, 22(17): 6325. August 2022.\n \n\n\n\n
\n\n\n\n \n \n \"AugmentationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{li_augmentation_2022,\n\ttitle = {Augmentation {Method} for {High} {Intra}-{Class} {Variation} {Data} in {Apple} {Detection}},\n\tvolume = {22},\n\tissn = {1424-8220},\n\turl = {https://www.mdpi.com/1424-8220/22/17/6325},\n\tdoi = {10.3390/s22176325},\n\tabstract = {Deep learning is widely used in modern orchard production for various inspection missions, which helps improve the efficiency of orchard operations. In the mission of visual detection during fruit picking, most current lightweight detection models are not yet effective enough to detect multi-type occlusion targets, severely affecting automated fruit-picking efficiency. This study addresses this problem by proposing the pioneering design of a multi-type occlusion apple dataset and an augmentation method of data balance. We divided apple occlusion into eight types and used the proposed method to balance the number of annotation boxes for multi-type occlusion apple targets. Finally, a validation experiment was carried out using five popular lightweight object detection models: yolox-s, yolov5-s, yolov4-s, yolov3-tiny, and efficidentdet-d0. The results show that, using the proposed augmentation method, the average detection precision of the five popular lightweight object detection models improved significantly. Specifically, the precision increased from 0.894 to 0.974, recall increased from 0.845 to 0.972, and mAP0.5 increased from 0.982 to 0.919 for yolox-s. This implies that the proposed augmentation method shows great potential for different fruit detection missions in future orchard applications.},\n\tlanguage = {en},\n\tnumber = {17},\n\turldate = {2024-01-24},\n\tjournal = {Sensors},\n\tauthor = {Li, Huibin and Guo, Wei and Lu, Guowen and Shi, Yun},\n\tmonth = aug,\n\tyear = {2022},\n\tpages = {6325},\n}\n\n
\n
\n\n\n
\n Deep learning is widely used in modern orchard production for various inspection missions, which helps improve the efficiency of orchard operations. In the mission of visual detection during fruit picking, most current lightweight detection models are not yet effective enough to detect multi-type occlusion targets, severely affecting automated fruit-picking efficiency. This study addresses this problem by proposing the pioneering design of a multi-type occlusion apple dataset and an augmentation method of data balance. We divided apple occlusion into eight types and used the proposed method to balance the number of annotation boxes for multi-type occlusion apple targets. Finally, a validation experiment was carried out using five popular lightweight object detection models: yolox-s, yolov5-s, yolov4-s, yolov3-tiny, and efficidentdet-d0. The results show that, using the proposed augmentation method, the average detection precision of the five popular lightweight object detection models improved significantly. Specifically, the precision increased from 0.894 to 0.974, recall increased from 0.845 to 0.972, and mAP0.5 increased from 0.982 to 0.919 for yolox-s. This implies that the proposed augmentation method shows great potential for different fruit detection missions in future orchard applications.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n 地上ロボットを用いた植物フィルードフェノタイピング技術.\n \n \n \n \n\n\n \n 郭, 威\n\n\n \n\n\n\n JATAFFジャーナル, 10(12): 24–28. December 2022.\n \n\n\n\n
\n\n\n\n \n \n \"地上ロボットを用いた植物フィルードフェノタイピング技術Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 19 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{__2022,\n\ttitle = {地上ロボットを用いた植物フィルードフェノタイピング技術},\n\tvolume = {10},\n\turl = {https://www.jataff.or.jp/books/order/journal/yousi/JATAFFj1012.htm#10},\n\tabstract = {フィルードにおける植物の生育状態を測定するフェノタイピング作業の効率化は、ICT、AI、ロボティクス技術の活用を期待され、技術の開発が急速に進でいる。特に、IOTやドローンなど近接リモートセンシング技術による様々な視点から圃場データの効率的取得が可能となり、そのデータを解析し、フェノタイピングするアルゴリズムの開発も数多く報告されている。本論文は、人の目線に近い視点からのフェノタイピングを焦点にし、圃場で使える地上ロボット技術の現状とその課題、育種現場への応用と将来への展望を述べる。},\n\tlanguage = {日本語},\n\tnumber = {12},\n\tjournal = {JATAFFジャーナル},\n\tauthor = {郭, 威},\n\tmonth = dec,\n\tyear = {2022},\n\tpages = {24--28},\n}\n\n
\n
\n\n\n
\n フィルードにおける植物の生育状態を測定するフェノタイピング作業の効率化は、ICT、AI、ロボティクス技術の活用を期待され、技術の開発が急速に進でいる。特に、IOTやドローンなど近接リモートセンシング技術による様々な視点から圃場データの効率的取得が可能となり、そのデータを解析し、フェノタイピングするアルゴリズムの開発も数多く報告されている。本論文は、人の目線に近い視点からのフェノタイピングを焦点にし、圃場で使える地上ロボット技術の現状とその課題、育種現場への応用と将来への展望を述べる。\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust Surface Reconstruction of Plant Leaves from 3D Point Clouds.\n \n \n \n \n\n\n \n Ando, R.; Ozasa, Y.; and Guo, W.\n\n\n \n\n\n\n Plant Phenomics, 2021: 1–15. April 2021.\n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{ando_robust_2021,\n\ttitle = {Robust {Surface} {Reconstruction} of {Plant} {Leaves} from {3D} {Point} {Clouds}},\n\tvolume = {2021},\n\tissn = {2643-6515},\n\turl = {https://spj.sciencemag.org/journals/plantphenomics/2021/3184185/},\n\tdoi = {10.34133/2021/3184185},\n\tabstract = {The automation of plant phenotyping using 3D imaging techniques is indispensable. However, conventional methods for reconstructing the leaf surface from 3D point clouds have a trade-off between the accuracy of leaf surface reconstruction and the method’s robustness against noise and missing points. To mitigate this trade-off, we developed a leaf surface reconstruction method that reduces the effects of noise and missing points while maintaining surface reconstruction accuracy by capturing two components of the leaf (the shape and distortion of that shape) separately using leaf-specific properties. This separation simplifies leaf surface reconstruction compared with conventional methods while increasing the robustness against noise and missing points. To evaluate the proposed method, we reconstructed the leaf surfaces from 3D point clouds of leaves acquired from two crop species (soybean and sugar beet) and compared the results with those of conventional methods. The result showed that the proposed method robustly reconstructed the leaf surfaces, despite the noise and missing points for two different leaf shapes. To evaluate the stability of the leaf surface reconstructions, we also calculated the leaf surface areas for 14 consecutive days of the target leaves. The result derived from the proposed method showed less variation of values and fewer outliers compared with the conventional methods.},\n\tlanguage = {en},\n\turldate = {2022-04-12},\n\tjournal = {Plant Phenomics},\n\tauthor = {Ando, Ryuhei and Ozasa, Yuko and Guo, Wei},\n\tmonth = apr,\n\tyear = {2021},\n\tpages = {1--15},\n}\n\n
\n
\n\n\n
\n The automation of plant phenotyping using 3D imaging techniques is indispensable. However, conventional methods for reconstructing the leaf surface from 3D point clouds have a trade-off between the accuracy of leaf surface reconstruction and the method’s robustness against noise and missing points. To mitigate this trade-off, we developed a leaf surface reconstruction method that reduces the effects of noise and missing points while maintaining surface reconstruction accuracy by capturing two components of the leaf (the shape and distortion of that shape) separately using leaf-specific properties. This separation simplifies leaf surface reconstruction compared with conventional methods while increasing the robustness against noise and missing points. To evaluate the proposed method, we reconstructed the leaf surfaces from 3D point clouds of leaves acquired from two crop species (soybean and sugar beet) and compared the results with those of conventional methods. The result showed that the proposed method robustly reconstructed the leaf surfaces, despite the noise and missing points for two different leaf shapes. To evaluate the stability of the leaf surface reconstructions, we also calculated the leaf surface areas for 14 consecutive days of the target leaves. The result derived from the proposed method showed less variation of values and fewer outliers compared with the conventional methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n EasyDCP: An affordable, high‐throughput tool to measure plant phenotypic traits in 3D.\n \n \n \n \n\n\n \n Feldman, A.; Wang, H.; Fukano, Y.; Kato, Y.; Ninomiya, S.; and Guo, W.\n\n\n \n\n\n\n Methods in Ecology and Evolution, 12(9): 1679–1686. September 2021.\n \n\n\n\n
\n\n\n\n \n \n \"EasyDCP:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{feldman_easydcp_2021,\n\ttitle = {{EasyDCP}: {An} affordable, high‐throughput tool to measure plant phenotypic traits in {3D}},\n\tvolume = {12},\n\tissn = {2041-210X, 2041-210X},\n\tshorttitle = {{EasyDCP}},\n\turl = {https://onlinelibrary.wiley.com/doi/10.1111/2041-210X.13645},\n\tdoi = {10.1111/2041-210X.13645},\n\tabstract = {High-throughput 3D phenotyping is a rapidly emerging field that has widespread application for measurement of individual plants. Despite this, high-throughput plant phenotyping is rarely used in ecological studies due to financial and logistical limitations.\nWe introduce EasyDCP, a Python package for 3D phenotyping, which uses photogrammetry to automatically reconstruct 3D point clouds of individuals within populations of container plants and output phenotypic trait data. Here we give instructions for the imaging setup and the required hardware, which is minimal and do-it-yourself, and introduce the functionality and workflow of EasyDCP.\nWe compared the performance of EasyDCP against a high-end commercial laser scanner for the acquisition of plant height and projected leaf area. Both tools had strong correlations with ground truth measurement, and plant height measurements were more accurate using EasyDCP (plant height: EasyDCP r2 = 0.96, Laser r2 = 0.86; projected leaf area: EasyDCP r2 = 0.96, Laser r2 = 0.96).\nEasyDCP is an open-source software tool to measure phenotypic traits of container plants with high-throughput and low labour and financial costs.},\n\tlanguage = {en},\n\tnumber = {9},\n\turldate = {2022-04-12},\n\tjournal = {Methods in Ecology and Evolution},\n\tauthor = {Feldman, Alexander and Wang, Haozhou and Fukano, Yuya and Kato, Yoichiro and Ninomiya, Seishi and Guo, Wei},\n\tmonth = sep,\n\tyear = {2021},\n\tpages = {1679--1686},\n}\n\n
\n
\n\n\n
\n High-throughput 3D phenotyping is a rapidly emerging field that has widespread application for measurement of individual plants. Despite this, high-throughput plant phenotyping is rarely used in ecological studies due to financial and logistical limitations. We introduce EasyDCP, a Python package for 3D phenotyping, which uses photogrammetry to automatically reconstruct 3D point clouds of individuals within populations of container plants and output phenotypic trait data. Here we give instructions for the imaging setup and the required hardware, which is minimal and do-it-yourself, and introduce the functionality and workflow of EasyDCP. We compared the performance of EasyDCP against a high-end commercial laser scanner for the acquisition of plant height and projected leaf area. Both tools had strong correlations with ground truth measurement, and plant height measurements were more accurate using EasyDCP (plant height: EasyDCP r2 = 0.96, Laser r2 = 0.86; projected leaf area: EasyDCP r2 = 0.96, Laser r2 = 0.96). EasyDCP is an open-source software tool to measure phenotypic traits of container plants with high-throughput and low labour and financial costs.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Easy domain adaptation method for filling the species gap in deep learning-based fruit detection.\n \n \n \n \n\n\n \n Zhang, W.; Chen, K.; Wang, J.; Shi, Y.; and Guo, W.\n\n\n \n\n\n\n Horticulture Research, 8(1): 119. December 2021.\n \n\n\n\n
\n\n\n\n \n \n \"EasyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{zhang_easy_2021,\n\ttitle = {Easy domain adaptation method for filling the species gap in deep learning-based fruit detection},\n\tvolume = {8},\n\tissn = {2662-6810, 2052-7276},\n\turl = {https://academic.oup.com/hr/article/6446655},\n\tdoi = {10.1038/s41438-021-00553-8},\n\tabstract = {Abstract\n            Fruit detection and counting are essential tasks for horticulture research. With computer vision technology development, fruit detection techniques based on deep learning have been widely used in modern orchards. However, most deep learning-based fruit detection models are generated based on fully supervised approaches, which means a model trained with one domain species may not be transferred to another. There is always a need to recreate and label the relevant training dataset, but such a procedure is time-consuming and labor-intensive. This paper proposed a domain adaptation method that can transfer an existing model trained from one domain to a new domain without extra manual labeling. The method includes three main steps: transform the source fruit image (with labeled information) into the target fruit image (without labeled information) through the CycleGAN network; Automatically label the target fruit image by a pseudo-label process; Improve the labeling accuracy by a pseudo-label self-learning approach. Use a labeled orange image dataset as the source domain, unlabeled apple and tomato image dataset as the target domain, the performance of the proposed method from the perspective of fruit detection has been evaluated. Without manual labeling for target domain image, the mean average precision reached 87.5\\% for apple detection and 76.9\\% for tomato detection, which shows that the proposed method can potentially fill the species gap in deep learning-based fruit detection.},\n\tlanguage = {en},\n\tnumber = {1},\n\turldate = {2022-04-12},\n\tjournal = {Horticulture Research},\n\tauthor = {Zhang, Wenli and Chen, Kaizhen and Wang, Jiaqi and Shi, Yun and Guo, Wei},\n\tmonth = dec,\n\tyear = {2021},\n\tpages = {119},\n}\n\n
\n
\n\n\n
\n Abstract Fruit detection and counting are essential tasks for horticulture research. With computer vision technology development, fruit detection techniques based on deep learning have been widely used in modern orchards. However, most deep learning-based fruit detection models are generated based on fully supervised approaches, which means a model trained with one domain species may not be transferred to another. There is always a need to recreate and label the relevant training dataset, but such a procedure is time-consuming and labor-intensive. This paper proposed a domain adaptation method that can transfer an existing model trained from one domain to a new domain without extra manual labeling. The method includes three main steps: transform the source fruit image (with labeled information) into the target fruit image (without labeled information) through the CycleGAN network; Automatically label the target fruit image by a pseudo-label process; Improve the labeling accuracy by a pseudo-label self-learning approach. Use a labeled orange image dataset as the source domain, unlabeled apple and tomato image dataset as the target domain, the performance of the proposed method from the perspective of fruit detection has been evaluated. Without manual labeling for target domain image, the mean average precision reached 87.5% for apple detection and 76.9% for tomato detection, which shows that the proposed method can potentially fill the species gap in deep learning-based fruit detection.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n UAS-Based Plant Phenotyping for Research and Breeding Applications.\n \n \n \n \n\n\n \n Guo, W.; Carroll, M. E.; Singh, A.; Swetnam, T. L.; Merchant, N.; Sarkar, S.; Singh, A. K.; and Ganapathysubramanian, B.\n\n\n \n\n\n\n Plant Phenomics, 2021: 1–21. June 2021.\n \n\n\n\n
\n\n\n\n \n \n \"UAS-BasedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{guo_uas-based_2021,\n\ttitle = {{UAS}-{Based} {Plant} {Phenotyping} for {Research} and {Breeding} {Applications}},\n\tvolume = {2021},\n\tissn = {2643-6515},\n\turl = {https://spj.sciencemag.org/journals/plantphenomics/2021/9840192/},\n\tdoi = {10.34133/2021/9840192},\n\tabstract = {Unmanned aircraft system (UAS) is a particularly powerful tool for plant phenotyping, due to reasonable cost of procurement and deployment, ease and flexibility for control and operation, ability to reconfigure sensor payloads to diversify sensing, and the ability to seamlessly fit into a larger connected phenotyping network. These advantages have expanded the use of UAS-based plant phenotyping approach in research and breeding applications. This paper reviews the state of the art in the deployment, collection, curation, storage, and analysis of data from UAS-based phenotyping platforms. We discuss pressing technical challenges, identify future trends in UAS-based phenotyping that the plant research community should be aware of, and pinpoint key plant science and agronomic questions that can be resolved with the next generation of UAS-based imaging modalities and associated data analysis pipelines. This review provides a broad account of the state of the art in UAS-based phenotyping to reduce the barrier to entry to plant science practitioners interested in deploying this imaging modality for phenotyping in plant breeding and research areas.},\n\tlanguage = {en},\n\turldate = {2022-04-12},\n\tjournal = {Plant Phenomics},\n\tauthor = {Guo, Wei and Carroll, Matthew E. and Singh, Arti and Swetnam, Tyson L. and Merchant, Nirav and Sarkar, Soumik and Singh, Asheesh K. and Ganapathysubramanian, Baskar},\n\tmonth = jun,\n\tyear = {2021},\n\tpages = {1--21},\n}\n\n
\n
\n\n\n
\n Unmanned aircraft system (UAS) is a particularly powerful tool for plant phenotyping, due to reasonable cost of procurement and deployment, ease and flexibility for control and operation, ability to reconfigure sensor payloads to diversify sensing, and the ability to seamlessly fit into a larger connected phenotyping network. These advantages have expanded the use of UAS-based plant phenotyping approach in research and breeding applications. This paper reviews the state of the art in the deployment, collection, curation, storage, and analysis of data from UAS-based phenotyping platforms. We discuss pressing technical challenges, identify future trends in UAS-based phenotyping that the plant research community should be aware of, and pinpoint key plant science and agronomic questions that can be resolved with the next generation of UAS-based imaging modalities and associated data analysis pipelines. This review provides a broad account of the state of the art in UAS-based phenotyping to reduce the barrier to entry to plant science practitioners interested in deploying this imaging modality for phenotyping in plant breeding and research areas.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n GIS-Based Analysis for UAV-Supported Field Experiments Reveals Soybean Traits Associated With Rotational Benefit.\n \n \n \n \n\n\n \n Fukano, Y.; Guo, W.; Aoki, N.; Ootsuka, S.; Noshita, K.; Uchida, K.; Kato, Y.; Sasaki, K.; Kamikawa, S.; and Kubota, H.\n\n\n \n\n\n\n Frontiers in Plant Science, 12: 637694. May 2021.\n \n\n\n\n
\n\n\n\n \n \n \"GIS-BasedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{fukano_gis-based_2021,\n\ttitle = {{GIS}-{Based} {Analysis} for {UAV}-{Supported} {Field} {Experiments} {Reveals} {Soybean} {Traits} {Associated} {With} {Rotational} {Benefit}},\n\tvolume = {12},\n\tissn = {1664-462X},\n\turl = {https://www.frontiersin.org/articles/10.3389/fpls.2021.637694/full},\n\tdoi = {10.3389/fpls.2021.637694},\n\tabstract = {Recent advances in unmanned aerial vehicle (UAV) remote sensing and image analysis provide large amounts of plant canopy data, but there is no method to integrate the large imagery datasets with the much smaller manually collected datasets. A simple geographic information system (GIS)-based analysis for a UAV-supported field study (GAUSS) analytical framework was developed to integrate these datasets. It has three steps: developing a model for predicting sample values from UAV imagery, field gridding and trait value prediction, and statistical testing of predicted values. A field cultivation experiment was conducted to examine the effectiveness of the GAUSS framework, using a soybean–wheat crop rotation as the model system Fourteen soybean cultivars and subsequently a single wheat cultivar were grown in the same field. The crop rotation benefits of the soybeans for wheat yield were examined using GAUSS. Combining manually sampled data (\n              n\n              = 143) and pixel-based UAV imagery indices produced a large amount of high-spatial-resolution predicted wheat yields (\n              n\n              = 8,756). Significant differences were detected among soybean cultivars in their effects on wheat yield, and soybean plant traits were associated with the increases. This is the first reported study that links traits of legume plants with rotational benefits to the subsequent crop. Although some limitations and challenges remain, the GAUSS approach can be applied to many types of field-based plant experimentation, and has potential for extensive use in future studies.},\n\turldate = {2022-04-12},\n\tjournal = {Frontiers in Plant Science},\n\tauthor = {Fukano, Yuya and Guo, Wei and Aoki, Naohiro and Ootsuka, Shinjiro and Noshita, Koji and Uchida, Kei and Kato, Yoichiro and Sasaki, Kazuhiro and Kamikawa, Shotaka and Kubota, Hirofumi},\n\tmonth = may,\n\tyear = {2021},\n\tpages = {637694},\n}\n\n
\n
\n\n\n
\n Recent advances in unmanned aerial vehicle (UAV) remote sensing and image analysis provide large amounts of plant canopy data, but there is no method to integrate the large imagery datasets with the much smaller manually collected datasets. A simple geographic information system (GIS)-based analysis for a UAV-supported field study (GAUSS) analytical framework was developed to integrate these datasets. It has three steps: developing a model for predicting sample values from UAV imagery, field gridding and trait value prediction, and statistical testing of predicted values. A field cultivation experiment was conducted to examine the effectiveness of the GAUSS framework, using a soybean–wheat crop rotation as the model system Fourteen soybean cultivars and subsequently a single wheat cultivar were grown in the same field. The crop rotation benefits of the soybeans for wheat yield were examined using GAUSS. Combining manually sampled data ( n = 143) and pixel-based UAV imagery indices produced a large amount of high-spatial-resolution predicted wheat yields ( n = 8,756). Significant differences were detected among soybean cultivars in their effects on wheat yield, and soybean plant traits were associated with the increases. This is the first reported study that links traits of legume plants with rotational benefits to the subsequent crop. Although some limitations and challenges remain, the GAUSS approach can be applied to many types of field-based plant experimentation, and has potential for extensive use in future studies.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficient Maize Tassel-Detection Method using UAV based remote sensing.\n \n \n \n \n\n\n \n Kumar, A.; Desai, S. V.; Balasubramanian, V. N.; Rajalakshmi, P.; Guo, W.; Balaji Naik, B.; Balram, M.; and Desai, U. B.\n\n\n \n\n\n\n Remote Sensing Applications: Society and Environment, 23: 100549. August 2021.\n \n\n\n\n
\n\n\n\n \n \n \"EfficientPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{kumar_efficient_2021,\n\ttitle = {Efficient {Maize} {Tassel}-{Detection} {Method} using {UAV} based remote sensing},\n\tvolume = {23},\n\tissn = {23529385},\n\turl = {https://linkinghub.elsevier.com/retrieve/pii/S2352938521000859},\n\tdoi = {10.1016/j.rsase.2021.100549},\n\tabstract = {Regular monitoring is worthwhile to maintain a healthy crop. Historically, the manual observation was used to monitor crops, which is time-consuming and often costly. The recent boom in the development of Unmanned Aerial Vehicles (UAVs) has established a quick and easy way to monitor crops. UAVs can cover a wide area in a few minutes and obtain useful crop information with different sensors such as RGB, multispectral, hyperspectral cameras. Simultaneously, Convolutional Neural Networks (CNNs) have been effectively used for various vision-based agricultural monitoring activities, such as flower detection, fruit counting, and yield estimation. However, Convolutional Neural Network (CNN) requires a massive amount of labeled data for training, which is not always easy to obtain. Especially in agriculture, generating labeled datasets is time-consuming and exhaustive since interest objects are typically small in size and large in number. This paper proposes a novel method using k-means clustering with adaptive thresholding for detecting maize crop tassels to address these issues. The qualitative and quantitative analysis of the proposed method reveals that our method performs close to reference approaches and has an advantage over computational complexity. The proposed method detected and counted tassels with precision: 0.97438, recall: 0.88132, and F1 Score: 0.92412. In addition, using maize tassel detection from UAV images as the task in this paper, we propose a semi-automatic image annotation method to create labeled datasets of the maize crop easily. Based on the proposed method, the developed tool can be used in conjunction with a machine learning model to provide initial annotations for a given image, modified further by the user. Our tool's performance analysis reveals promising savings in annotation time, enabling the rapid production of maize crop labeled datasets.},\n\tlanguage = {en},\n\turldate = {2022-04-12},\n\tjournal = {Remote Sensing Applications: Society and Environment},\n\tauthor = {Kumar, Ajay and Desai, Sai Vikas and Balasubramanian, Vineeth N. and Rajalakshmi, P. and Guo, Wei and Balaji Naik, B. and Balram, M. and Desai, Uday B.},\n\tmonth = aug,\n\tyear = {2021},\n\tpages = {100549},\n}\n\n
\n
\n\n\n
\n Regular monitoring is worthwhile to maintain a healthy crop. Historically, the manual observation was used to monitor crops, which is time-consuming and often costly. The recent boom in the development of Unmanned Aerial Vehicles (UAVs) has established a quick and easy way to monitor crops. UAVs can cover a wide area in a few minutes and obtain useful crop information with different sensors such as RGB, multispectral, hyperspectral cameras. Simultaneously, Convolutional Neural Networks (CNNs) have been effectively used for various vision-based agricultural monitoring activities, such as flower detection, fruit counting, and yield estimation. However, Convolutional Neural Network (CNN) requires a massive amount of labeled data for training, which is not always easy to obtain. Especially in agriculture, generating labeled datasets is time-consuming and exhaustive since interest objects are typically small in size and large in number. This paper proposes a novel method using k-means clustering with adaptive thresholding for detecting maize crop tassels to address these issues. The qualitative and quantitative analysis of the proposed method reveals that our method performs close to reference approaches and has an advantage over computational complexity. The proposed method detected and counted tassels with precision: 0.97438, recall: 0.88132, and F1 Score: 0.92412. In addition, using maize tassel detection from UAV images as the task in this paper, we propose a semi-automatic image annotation method to create labeled datasets of the maize crop easily. Based on the proposed method, the developed tool can be used in conjunction with a machine learning model to provide initial annotations for a given image, modified further by the user. Our tool's performance analysis reveals promising savings in annotation time, enabling the rapid production of maize crop labeled datasets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n EasyIDP: A Python Package for Intermediate Data Processing in UAV-Based Plant Phenotyping.\n \n \n \n \n\n\n \n Wang, H.; Duan, Y.; Shi, Y.; Kato, Y.; Ninomiya, S.; and Guo, W.\n\n\n \n\n\n\n Remote Sensing, 13(13): 2622. July 2021.\n \n\n\n\n
\n\n\n\n \n \n \"EasyIDP:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{wang_easyidp_2021,\n\ttitle = {{EasyIDP}: {A} {Python} {Package} for {Intermediate} {Data} {Processing} in {UAV}-{Based} {Plant} {Phenotyping}},\n\tvolume = {13},\n\tissn = {2072-4292},\n\tshorttitle = {{EasyIDP}},\n\turl = {https://www.mdpi.com/2072-4292/13/13/2622},\n\tdoi = {10.3390/rs13132622},\n\tabstract = {Unmanned aerial vehicle (UAV) and structure from motion (SfM) photogrammetry techniques are widely used for field-based, high-throughput plant phenotyping nowadays, but some of the intermediate processes throughout the workflow remain manual. For example, geographic information system (GIS) software is used to manually assess the 2D/3D field reconstruction quality and cropping region of interests (ROIs) from the whole field. In addition, extracting phenotypic traits from raw UAV images is more competitive than directly from the digital orthomosaic (DOM). Currently, no easy-to-use tools are available to implement previous tasks for commonly used commercial SfM software, such as Pix4D and Agisoft Metashape. Hence, an open source software package called easy intermediate data processor (EasyIDP; MIT license) was developed to decrease the workload in intermediate data processing mentioned above. The functions of the proposed package include (1) an ROI cropping module, assisting in reconstruction quality assessment and cropping ROIs from the whole field, and (2) an ROI reversing module, projecting ROIs to relative raw images. The result showed that both cropping and reversing modules work as expected. Moreover, the effects of ROI height selection and reversed ROI position on raw images to reverse calculation were discussed. This tool shows great potential for decreasing workload in data annotation for machine learning applications.},\n\tlanguage = {en},\n\tnumber = {13},\n\turldate = {2022-04-12},\n\tjournal = {Remote Sensing},\n\tauthor = {Wang, Haozhou and Duan, Yulin and Shi, Yun and Kato, Yoichiro and Ninomiya, Seishi and Guo, Wei},\n\tmonth = jul,\n\tyear = {2021},\n\tpages = {2622},\n}\n\n
\n
\n\n\n
\n Unmanned aerial vehicle (UAV) and structure from motion (SfM) photogrammetry techniques are widely used for field-based, high-throughput plant phenotyping nowadays, but some of the intermediate processes throughout the workflow remain manual. For example, geographic information system (GIS) software is used to manually assess the 2D/3D field reconstruction quality and cropping region of interests (ROIs) from the whole field. In addition, extracting phenotypic traits from raw UAV images is more competitive than directly from the digital orthomosaic (DOM). Currently, no easy-to-use tools are available to implement previous tasks for commonly used commercial SfM software, such as Pix4D and Agisoft Metashape. Hence, an open source software package called easy intermediate data processor (EasyIDP; MIT license) was developed to decrease the workload in intermediate data processing mentioned above. The functions of the proposed package include (1) an ROI cropping module, assisting in reconstruction quality assessment and cropping ROIs from the whole field, and (2) an ROI reversing module, projecting ROIs to relative raw images. The result showed that both cropping and reversing modules work as expected. Moreover, the effects of ROI height selection and reversed ROI position on raw images to reverse calculation were discussed. This tool shows great potential for decreasing workload in data annotation for machine learning applications.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Estimates of Maize Plant Density from UAV RGB Images Using Faster-RCNN Detection Model: Impact of the Spatial Resolution.\n \n \n \n \n\n\n \n Velumani, K.; Lopez-Lozano, R.; Madec, S.; Guo, W.; Gillet, J.; Comar, A.; and Baret, F.\n\n\n \n\n\n\n Plant Phenomics, 2021: 1–16. August 2021.\n \n\n\n\n
\n\n\n\n \n \n \"EstimatesPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{velumani_estimates_2021,\n\ttitle = {Estimates of {Maize} {Plant} {Density} from {UAV} {RGB} {Images} {Using} {Faster}-{RCNN} {Detection} {Model}: {Impact} of the {Spatial} {Resolution}},\n\tvolume = {2021},\n\tissn = {2643-6515},\n\tshorttitle = {Estimates of {Maize} {Plant} {Density} from {UAV} {RGB} {Images} {Using} {Faster}-{RCNN} {Detection} {Model}},\n\turl = {https://spj.sciencemag.org/journals/plantphenomics/2021/9824843/},\n\tdoi = {10.34133/2021/9824843},\n\tabstract = {Early-stage plant density is an essential trait that determines the fate of a genotype under given environmental conditions and management practices. The use of RGB images taken from UAVs may replace the traditional visual counting in fields with improved throughput, accuracy, and access to plant localization. However, high-resolution images are required to detect the small plants present at the early stages. This study explores the impact of image ground sampling distance (GSD) on the performances of maize plant detection at three-to-five leaves stage using Faster-RCNN object detection algorithm. Data collected at high resolution (\n              \n                GSD\n                ≈\n                0.3\n                 \n                cm\n              \n              ) over six contrasted sites were used for model training. Two additional sites with images acquired both at high and low (\n              \n                GSD\n                ≈\n                0.6\n                 \n                cm\n              \n              ) resolutions were used to evaluate the model performances. Results show that Faster-RCNN achieved very good plant detection and counting (\n              \n                rRMSE\n                =\n                0.08\n              \n              ) performances when native high-resolution images are used both for training and validation. Similarly, good performances were observed (\n              \n                rRMSE\n                =\n                0.11\n              \n              ) when the model is trained over synthetic low-resolution images obtained by downsampling the native training high-resolution images and applied to the synthetic low-resolution validation images. Conversely, poor performances are obtained when the model is trained on a given spatial resolution and applied to another spatial resolution. Training on a mix of high- and low-resolution images allows to get very good performances on the native high-resolution (\n              \n                rRMSE\n                =\n                0.06\n              \n              ) and synthetic low-resolution (\n              \n                rRMSE\n                =\n                0.10\n              \n              ) images. However, very low performances are still observed over the native low-resolution images (\n              \n                rRMSE\n                =\n                0.48\n              \n              ), mainly due to the poor quality of the native low-resolution images. Finally, an advanced super resolution method based on GAN (generative adversarial network) that introduces additional textural information derived from the native high-resolution images was applied to the native low-resolution validation images. Results show some significant improvement (\n              \n                rRMSE\n                =\n                0.22\n              \n              ) compared to bicubic upsampling approach, while still far below the performances achieved over the native high-resolution images.},\n\tlanguage = {en},\n\turldate = {2022-04-12},\n\tjournal = {Plant Phenomics},\n\tauthor = {Velumani, K. and Lopez-Lozano, R. and Madec, S. and Guo, W. and Gillet, J. and Comar, A. and Baret, F.},\n\tmonth = aug,\n\tyear = {2021},\n\tpages = {1--16},\n}\n\n
\n
\n\n\n
\n Early-stage plant density is an essential trait that determines the fate of a genotype under given environmental conditions and management practices. The use of RGB images taken from UAVs may replace the traditional visual counting in fields with improved throughput, accuracy, and access to plant localization. However, high-resolution images are required to detect the small plants present at the early stages. This study explores the impact of image ground sampling distance (GSD) on the performances of maize plant detection at three-to-five leaves stage using Faster-RCNN object detection algorithm. Data collected at high resolution ( GSD ≈ 0.3   cm ) over six contrasted sites were used for model training. Two additional sites with images acquired both at high and low ( GSD ≈ 0.6   cm ) resolutions were used to evaluate the model performances. Results show that Faster-RCNN achieved very good plant detection and counting ( rRMSE = 0.08 ) performances when native high-resolution images are used both for training and validation. Similarly, good performances were observed ( rRMSE = 0.11 ) when the model is trained over synthetic low-resolution images obtained by downsampling the native training high-resolution images and applied to the synthetic low-resolution validation images. Conversely, poor performances are obtained when the model is trained on a given spatial resolution and applied to another spatial resolution. Training on a mix of high- and low-resolution images allows to get very good performances on the native high-resolution ( rRMSE = 0.06 ) and synthetic low-resolution ( rRMSE = 0.10 ) images. However, very low performances are still observed over the native low-resolution images ( rRMSE = 0.48 ), mainly due to the poor quality of the native low-resolution images. Finally, an advanced super resolution method based on GAN (generative adversarial network) that introduces additional textural information derived from the native high-resolution images was applied to the native low-resolution validation images. Results show some significant improvement ( rRMSE = 0.22 ) compared to bicubic upsampling approach, while still far below the performances achieved over the native high-resolution images.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Efficient Method for Estimating Wheat Heading Dates Using UAV Images.\n \n \n \n \n\n\n \n Zhao, L.; Guo, W.; Wang, J.; Wang, H.; Duan, Y.; Wang, C.; Wu, W.; and Shi, Y.\n\n\n \n\n\n\n Remote Sensing, 13(16): 3067. August 2021.\n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{zhao_efficient_2021,\n\ttitle = {An {Efficient} {Method} for {Estimating} {Wheat} {Heading} {Dates} {Using} {UAV} {Images}},\n\tvolume = {13},\n\tissn = {2072-4292},\n\turl = {https://www.mdpi.com/2072-4292/13/16/3067},\n\tdoi = {10.3390/rs13163067},\n\tabstract = {Convenient, efficient, and high-throughput estimation of wheat heading dates is of great significance in plant sciences and agricultural research. However, documenting heading dates is time-consuming, labor-intensive, and subjective on a large-scale field. To overcome these challenges, model- and image-based approaches are used to estimate heading dates. Phenology models usually require complicated parameters calibrations, making it difficult to model other varieties and different locations, while in situ field-image recognition usually requires the deployment of a large amount of observational equipment, which is expensive. Therefore, in this study, we proposed a growth curve-based method for estimating wheat heading dates. The method first generates a height-based continuous growth curve based on five time-series unmanned aerial vehicle (UAV) images captured over the entire wheat growth cycle ({\\textgreater}200 d). Then estimate the heading date by generated growth curve. As a result, the proposed method had a mean absolute error of 2.81 d and a root mean square error of 3.49 d for 72 wheat plots composed of different varieties and densities sown on different dates. Thus, the proposed method is straightforward, efficient, and affordable and meets the high-throughput estimation requirements of large-scale fields and underdeveloped areas.},\n\tlanguage = {en},\n\tnumber = {16},\n\turldate = {2022-04-12},\n\tjournal = {Remote Sensing},\n\tauthor = {Zhao, Licheng and Guo, Wei and Wang, Jian and Wang, Haozhou and Duan, Yulin and Wang, Cong and Wu, Wenbin and Shi, Yun},\n\tmonth = aug,\n\tyear = {2021},\n\tpages = {3067},\n}\n\n
\n
\n\n\n
\n Convenient, efficient, and high-throughput estimation of wheat heading dates is of great significance in plant sciences and agricultural research. However, documenting heading dates is time-consuming, labor-intensive, and subjective on a large-scale field. To overcome these challenges, model- and image-based approaches are used to estimate heading dates. Phenology models usually require complicated parameters calibrations, making it difficult to model other varieties and different locations, while in situ field-image recognition usually requires the deployment of a large amount of observational equipment, which is expensive. Therefore, in this study, we proposed a growth curve-based method for estimating wheat heading dates. The method first generates a height-based continuous growth curve based on five time-series unmanned aerial vehicle (UAV) images captured over the entire wheat growth cycle (\\textgreater200 d). Then estimate the heading date by generated growth curve. As a result, the proposed method had a mean absolute error of 2.81 d and a root mean square error of 3.49 d for 72 wheat plots composed of different varieties and densities sown on different dates. Thus, the proposed method is straightforward, efficient, and affordable and meets the high-throughput estimation requirements of large-scale fields and underdeveloped areas.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Global Wheat Head Detection 2021: An Improved Dataset for Benchmarking Wheat Head Detection Methods.\n \n \n \n \n\n\n \n David, E.; Serouart, M.; Smith, D.; Madec, S.; Velumani, K.; Liu, S.; Wang, X.; Pinto, F.; Shafiee, S.; Tahir, I. S. A.; Tsujimoto, H.; Nasuda, S.; Zheng, B.; Kirchgessner, N.; Aasen, H.; Hund, A.; Sadhegi-Tehran, P.; Nagasawa, K.; Ishikawa, G.; Dandrifosse, S.; Carlier, A.; Dumont, B.; Mercatoris, B.; Evers, B.; Kuroki, K.; Wang, H.; Ishii, M.; Badhon, M. A.; Pozniak, C.; LeBauer, D. S.; Lillemo, M.; Poland, J.; Chapman, S.; de Solan, B.; Baret, F.; Stavness, I.; and Guo, W.\n\n\n \n\n\n\n Plant Phenomics, 2021: 1–9. September 2021.\n \n\n\n\n
\n\n\n\n \n \n \"GlobalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{david_global_2021,\n\ttitle = {Global {Wheat} {Head} {Detection} 2021: {An} {Improved} {Dataset} for {Benchmarking} {Wheat} {Head} {Detection} {Methods}},\n\tvolume = {2021},\n\tissn = {2643-6515},\n\tshorttitle = {Global {Wheat} {Head} {Detection} 2021},\n\turl = {https://spj.sciencemag.org/journals/plantphenomics/2021/9846158/},\n\tdoi = {10.34133/2021/9846158},\n\tabstract = {The Global Wheat Head Detection (GWHD) dataset was created in 2020 and has assembled 193,634 labelled wheat heads from 4700 RGB images acquired from various acquisition platforms and 7 countries/institutions. With an associated competition hosted in Kaggle, GWHD\\_2020 has successfully attracted attention from both the computer vision and agricultural science communities. From this first experience, a few avenues for improvements have been identified regarding data size, head diversity, and label reliability. To address these issues, the 2020 dataset has been reexamined, relabeled, and complemented by adding 1722 images from 5 additional countries, allowing for 81,553 additional wheat heads. We now release in 2021 a new version of the Global Wheat Head Detection dataset, which is bigger, more diverse, and less noisy than the GWHD\\_2020 version.},\n\tlanguage = {en},\n\turldate = {2022-04-12},\n\tjournal = {Plant Phenomics},\n\tauthor = {David, Etienne and Serouart, Mario and Smith, Daniel and Madec, Simon and Velumani, Kaaviya and Liu, Shouyang and Wang, Xu and Pinto, Francisco and Shafiee, Shahameh and Tahir, Izzat S. A. and Tsujimoto, Hisashi and Nasuda, Shuhei and Zheng, Bangyou and Kirchgessner, Norbert and Aasen, Helge and Hund, Andreas and Sadhegi-Tehran, Pouria and Nagasawa, Koichi and Ishikawa, Goro and Dandrifosse, Sébastien and Carlier, Alexis and Dumont, Benjamin and Mercatoris, Benoit and Evers, Byron and Kuroki, Ken and Wang, Haozhou and Ishii, Masanori and Badhon, Minhajul A. and Pozniak, Curtis and LeBauer, David Shaner and Lillemo, Morten and Poland, Jesse and Chapman, Scott and de Solan, Benoit and Baret, Frédéric and Stavness, Ian and Guo, Wei},\n\tmonth = sep,\n\tyear = {2021},\n\tpages = {1--9},\n}\n\n
\n
\n\n\n
\n The Global Wheat Head Detection (GWHD) dataset was created in 2020 and has assembled 193,634 labelled wheat heads from 4700 RGB images acquired from various acquisition platforms and 7 countries/institutions. With an associated competition hosted in Kaggle, GWHD_2020 has successfully attracted attention from both the computer vision and agricultural science communities. From this first experience, a few avenues for improvements have been identified regarding data size, head diversity, and label reliability. To address these issues, the 2020 dataset has been reexamined, relabeled, and complemented by adding 1722 images from 5 additional countries, allowing for 81,553 additional wheat heads. We now release in 2021 a new version of the Global Wheat Head Detection dataset, which is bigger, more diverse, and less noisy than the GWHD_2020 version.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Lightweight Fruit-Detection Algorithm for Edge Computing Applications.\n \n \n \n \n\n\n \n Zhang, W.; Liu, Y.; Chen, K.; Li, H.; Duan, Y.; Wu, W.; Shi, Y.; and Guo, W.\n\n\n \n\n\n\n Frontiers in Plant Science, 12: 740936. October 2021.\n \n\n\n\n
\n\n\n\n \n \n \"LightweightPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{zhang_lightweight_2021,\n\ttitle = {Lightweight {Fruit}-{Detection} {Algorithm} for {Edge} {Computing} {Applications}},\n\tvolume = {12},\n\tissn = {1664-462X},\n\turl = {https://www.frontiersin.org/articles/10.3389/fpls.2021.740936/full},\n\tdoi = {10.3389/fpls.2021.740936},\n\tabstract = {In recent years, deep-learning-based fruit-detection technology has exhibited excellent performance in modern horticulture research. However, deploying deep learning algorithms in real-time field applications is still challenging, owing to the relatively low image processing capability of edge devices. Such limitations are becoming a new bottleneck and hindering the utilization of AI algorithms in modern horticulture. In this paper, we propose a lightweight fruit-detection algorithm, specifically designed for edge devices. The algorithm is based on Light-CSPNet as the backbone network, an improved feature-extraction module, a down-sampling method, and a feature-fusion module, and it ensures real-time detection on edge devices while maintaining the fruit-detection accuracy. The proposed algorithm was tested on three edge devices: NVIDIA Jetson Xavier NX, NVIDIA Jetson TX2, and NVIDIA Jetson NANO. The experimental results show that the average detection precision of the proposed algorithm for orange, tomato, and apple datasets are 0.93, 0.847, and 0.850, respectively. Deploying the algorithm, the detection speed of NVIDIA Jetson Xavier NX reaches 21.3, 24.8, and 22.2 FPS, while that of NVIDIA Jetson TX2 reaches 13.9, 14.1, and 14.5 FPS and that of NVIDIA Jetson NANO reaches 6.3, 5.0, and 8.5 FPS for the three datasets. Additionally, the proposed algorithm provides a component add/remove function to flexibly adjust the model structure, considering the trade-off between the detection accuracy and speed in practical usage.},\n\turldate = {2022-04-12},\n\tjournal = {Frontiers in Plant Science},\n\tauthor = {Zhang, Wenli and Liu, Yuxin and Chen, Kaizhen and Li, Huibin and Duan, Yulin and Wu, Wenbin and Shi, Yun and Guo, Wei},\n\tmonth = oct,\n\tyear = {2021},\n\tpages = {740936},\n}\n\n
\n
\n\n\n
\n In recent years, deep-learning-based fruit-detection technology has exhibited excellent performance in modern horticulture research. However, deploying deep learning algorithms in real-time field applications is still challenging, owing to the relatively low image processing capability of edge devices. Such limitations are becoming a new bottleneck and hindering the utilization of AI algorithms in modern horticulture. In this paper, we propose a lightweight fruit-detection algorithm, specifically designed for edge devices. The algorithm is based on Light-CSPNet as the backbone network, an improved feature-extraction module, a down-sampling method, and a feature-fusion module, and it ensures real-time detection on edge devices while maintaining the fruit-detection accuracy. The proposed algorithm was tested on three edge devices: NVIDIA Jetson Xavier NX, NVIDIA Jetson TX2, and NVIDIA Jetson NANO. The experimental results show that the average detection precision of the proposed algorithm for orange, tomato, and apple datasets are 0.93, 0.847, and 0.850, respectively. Deploying the algorithm, the detection speed of NVIDIA Jetson Xavier NX reaches 21.3, 24.8, and 22.2 FPS, while that of NVIDIA Jetson TX2 reaches 13.9, 14.1, and 14.5 FPS and that of NVIDIA Jetson NANO reaches 6.3, 5.0, and 8.5 FPS for the three datasets. Additionally, the proposed algorithm provides a component add/remove function to flexibly adjust the model structure, considering the trade-off between the detection accuracy and speed in practical usage.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Active learning with point supervision for cost-effective panicle detection in cereal crops.\n \n \n \n \n\n\n \n Chandra, A. L.; Desai, S. V.; Balasubramanian, V. N.; Ninomiya, S.; and Guo, W.\n\n\n \n\n\n\n Plant Methods, 16(1): 34. December 2020.\n \n\n\n\n
\n\n\n\n \n \n \"ActivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{chandra_active_2020,\n\ttitle = {Active learning with point supervision for cost-effective panicle detection in cereal crops},\n\tvolume = {16},\n\tissn = {1746-4811},\n\turl = {https://plantmethods.biomedcentral.com/articles/10.1186/s13007-020-00575-8},\n\tdoi = {10.1186/s13007-020-00575-8},\n\tabstract = {Abstract\n            \n              Background\n              Panicle density of cereal crops such as wheat and sorghum is one of the main components for plant breeders and agronomists in understanding the yield of their crops. To phenotype the panicle density effectively, researchers agree there is a significant need for computer vision-based object detection techniques. Especially in recent times, research in deep learning-based object detection shows promising results in various agricultural studies. However, training such systems usually requires a lot of bounding-box labeled data. Since crops vary by both environmental and genetic conditions, acquisition of huge amount of labeled image datasets for each crop is expensive and time-consuming. Thus, to catalyze the widespread usage of automatic object detection for crop phenotyping, a cost-effective method to develop such automated systems is essential.\n            \n            \n              Results\n              We propose a point supervision based active learning approach for panicle detection in cereal crops. In our approach, the model constantly interacts with a human annotator by iteratively querying the labels for only the most informative images, as opposed to all images in a dataset. Our query method is specifically designed for cereal crops which usually tend to have panicles with low variance in appearance. Our method reduces labeling costs by intelligently leveraging low-cost weak labels (object centers) for picking the most informative images for which strong labels (bounding boxes) are required. We show promising results on two publicly available cereal crop datasets—Sorghum and Wheat. On Sorghum, 6 variants of our proposed method outperform the best baseline method with more than 55\\% savings in labeling time. Similarly, on Wheat, 3 variants of our proposed methods outperform the best baseline method with more than 50\\% of savings in labeling time.\n            \n            \n              Conclusion\n              We proposed a cost effective method to train reliable panicle detectors for cereal crops. A low cost panicle detection method for cereal crops is highly beneficial to both breeders and agronomists. Plant breeders can obtain quick crop yield estimates to make important crop management decisions. Similarly, obtaining real time visual crop analysis is valuable for researchers to analyze the crop’s response to various experimental conditions.},\n\tlanguage = {en},\n\tnumber = {1},\n\turldate = {2022-04-12},\n\tjournal = {Plant Methods},\n\tauthor = {Chandra, Akshay L. and Desai, Sai Vikas and Balasubramanian, Vineeth N. and Ninomiya, Seishi and Guo, Wei},\n\tmonth = dec,\n\tyear = {2020},\n\tpages = {34},\n}\n\n
\n
\n\n\n
\n Abstract Background Panicle density of cereal crops such as wheat and sorghum is one of the main components for plant breeders and agronomists in understanding the yield of their crops. To phenotype the panicle density effectively, researchers agree there is a significant need for computer vision-based object detection techniques. Especially in recent times, research in deep learning-based object detection shows promising results in various agricultural studies. However, training such systems usually requires a lot of bounding-box labeled data. Since crops vary by both environmental and genetic conditions, acquisition of huge amount of labeled image datasets for each crop is expensive and time-consuming. Thus, to catalyze the widespread usage of automatic object detection for crop phenotyping, a cost-effective method to develop such automated systems is essential. Results We propose a point supervision based active learning approach for panicle detection in cereal crops. In our approach, the model constantly interacts with a human annotator by iteratively querying the labels for only the most informative images, as opposed to all images in a dataset. Our query method is specifically designed for cereal crops which usually tend to have panicles with low variance in appearance. Our method reduces labeling costs by intelligently leveraging low-cost weak labels (object centers) for picking the most informative images for which strong labels (bounding boxes) are required. We show promising results on two publicly available cereal crop datasets—Sorghum and Wheat. On Sorghum, 6 variants of our proposed method outperform the best baseline method with more than 55% savings in labeling time. Similarly, on Wheat, 3 variants of our proposed methods outperform the best baseline method with more than 50% of savings in labeling time. Conclusion We proposed a cost effective method to train reliable panicle detectors for cereal crops. A low cost panicle detection method for cereal crops is highly beneficial to both breeders and agronomists. Plant breeders can obtain quick crop yield estimates to make important crop management decisions. Similarly, obtaining real time visual crop analysis is valuable for researchers to analyze the crop’s response to various experimental conditions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Computer Vision with Deep Learning for Plant Phenotyping in Agriculture: A Survey.\n \n \n \n \n\n\n \n Balasubramanian, V. N; Guo, W.; Chandra, A. L; and Desai, S. V.\n\n\n \n\n\n\n Advanced Computing and Communications. March 2020.\n \n\n\n\n
\n\n\n\n \n \n \"ComputerPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{balasubramanian_computer_2020,\n\ttitle = {Computer {Vision} with {Deep} {Learning} for {Plant} {Phenotyping} in {Agriculture}: {A} {Survey}},\n\tshorttitle = {Computer {Vision} with {Deep} {Learning} for {Plant} {Phenotyping} in {Agriculture}},\n\turl = {https://journal.accsindia.org/computer-vision-with-deep-learning-for-plant-phenotyping-in-agriculture-a-survey/},\n\tdoi = {10.34048/ACC.2020.1.F1},\n\tabstract = {In light of growing challenges in agriculture with ever growing food demand across the world, efficient crop management techniques are necessary to increase crop yield. Precision agriculture techniques allow the stakeholders to make effective and customized crop management decisions based on data gathered from monitoring crop environments. Plant phenotyping techniques play a major role in accurate crop monitoring. Advancements in deep learning have made previously difficult phenotyping tasks possible. This survey aims to introduce the reader to the state of the art research in deep plant phenotyping.},\n\tlanguage = {en},\n\turldate = {2022-04-12},\n\tjournal = {Advanced Computing and Communications},\n\tauthor = {Balasubramanian, Vineeth N and Guo, Wei and Chandra, Akshay L and Desai, Sai Vikas},\n\tmonth = mar,\n\tyear = {2020},\n}\n\n
\n
\n\n\n
\n In light of growing challenges in agriculture with ever growing food demand across the world, efficient crop management techniques are necessary to increase crop yield. Precision agriculture techniques allow the stakeholders to make effective and customized crop management decisions based on data gathered from monitoring crop environments. Plant phenotyping techniques play a major role in accurate crop monitoring. Advancements in deep learning have made previously difficult phenotyping tasks possible. This survey aims to introduce the reader to the state of the art research in deep plant phenotyping.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Intact Detection of Highly Occluded Immature Tomatoes on Plants Using Deep Learning Techniques.\n \n \n \n \n\n\n \n Mu, Y.; Chen, T.; Ninomiya, S.; and Guo, W.\n\n\n \n\n\n\n Sensors, 20(10): 2984. May 2020.\n \n\n\n\n
\n\n\n\n \n \n \"IntactPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{mu_intact_2020,\n\ttitle = {Intact {Detection} of {Highly} {Occluded} {Immature} {Tomatoes} on {Plants} {Using} {Deep} {Learning} {Techniques}},\n\tvolume = {20},\n\tissn = {1424-8220},\n\turl = {https://www.mdpi.com/1424-8220/20/10/2984},\n\tdoi = {10.3390/s20102984},\n\tabstract = {Automatic detection of intact tomatoes on plants is highly expected for low-cost and optimal management in tomato farming. Mature tomato detection has been wildly studied, while immature tomato detection, especially when occluded with leaves, is difficult to perform using traditional image analysis, which is more important for long-term yield prediction. Therefore, tomato detection that can generalize well in real tomato cultivation scenes and is robust to issues such as fruit occlusion and variable lighting conditions is highly desired. In this study, we build a tomato detection model to automatically detect intact green tomatoes regardless of occlusions or fruit growth stage using deep learning approaches. The tomato detection model used faster region-based convolutional neural network (R-CNN) with Resnet-101 and transfer learned from the Common Objects in Context (COCO) dataset. The detection on test dataset achieved high average precision of 87.83\\% (intersection over union ≥ 0.5) and showed a high accuracy of tomato counting (R2 = 0.87). In addition, all the detected boxes were merged into one image to compile the tomato location map and estimate their size along one row in the greenhouse. By tomato detection, counting, location and size estimation, this method shows great potential for ripeness and yield prediction.},\n\tlanguage = {en},\n\tnumber = {10},\n\turldate = {2022-04-12},\n\tjournal = {Sensors},\n\tauthor = {Mu, Yue and Chen, Tai-Shen and Ninomiya, Seishi and Guo, Wei},\n\tmonth = may,\n\tyear = {2020},\n\tpages = {2984},\n}\n\n
\n
\n\n\n
\n Automatic detection of intact tomatoes on plants is highly expected for low-cost and optimal management in tomato farming. Mature tomato detection has been wildly studied, while immature tomato detection, especially when occluded with leaves, is difficult to perform using traditional image analysis, which is more important for long-term yield prediction. Therefore, tomato detection that can generalize well in real tomato cultivation scenes and is robust to issues such as fruit occlusion and variable lighting conditions is highly desired. In this study, we build a tomato detection model to automatically detect intact green tomatoes regardless of occlusions or fruit growth stage using deep learning approaches. The tomato detection model used faster region-based convolutional neural network (R-CNN) with Resnet-101 and transfer learned from the Common Objects in Context (COCO) dataset. The detection on test dataset achieved high average precision of 87.83% (intersection over union ≥ 0.5) and showed a high accuracy of tomato counting (R2 = 0.87). In addition, all the detected boxes were merged into one image to compile the tomato location map and estimate their size along one row in the greenhouse. By tomato detection, counting, location and size estimation, this method shows great potential for ripeness and yield prediction.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Contemporary adaptive divergence of plant competitive traits in urban and rural populations and its implication for weed management.\n \n \n \n \n\n\n \n Fukano, Y.; Guo, W.; Uchida, K.; and Tachiki, Y.\n\n\n \n\n\n\n Journal of Ecology, 108(6): 2521–2530. November 2020.\n \n\n\n\n
\n\n\n\n \n \n \"ContemporaryPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{fukano_contemporary_2020,\n\ttitle = {Contemporary adaptive divergence of plant competitive traits in urban and rural populations and its implication for weed management},\n\tvolume = {108},\n\tissn = {0022-0477, 1365-2745},\n\turl = {https://onlinelibrary.wiley.com/doi/10.1111/1365-2745.13472},\n\tdoi = {10.1111/1365-2745.13472},\n\tabstract = {Competition among neighbouring plants plays essential roles in growth, reproduction, population dynamics, and community assembly, but how competition drives local adaptation and the traits underlying the adaptation remain unclear. Here, we focused on populations of the aggressive weed Digitaria ciliaris from urban and rural habitats as low- and high-competition environments for light resources and examined how competitive interaction drove contemporary adaptive divergence of competitive traits.\nTo examine local adaptation to different competitive environments in D. ciliaris and the specific traits that have been selected for, we compared growth rate and competitive traits in plants from farmland and urban populations between high- and low-competition treatments. Furthermore, we conducted a field tillage experiment with drone (unmanned aerial vehicle) monitoring to examine the possibility that trait evolution in farmland habitats might influence weed management practices in crop fields.\nPlants from farmland populations had higher growth rates than plants from urban populations in high-competition treatments, and vice versa in low-competition treatments. Among populations, those with larger height/width ratios (farmland populations) were more tolerant of above-ground competition in high-competition treatments, but among individuals, those with larger ratios had lower growth rates in low-competition treatments. More plants from farmland populations, which had thicker stems (and larger height/width ratios), survived after experimental tillage than plants from urban habitats with thinner stems.\nSynthesis. Our study empirically demonstrated adaptive divergence in competitive traits in above-ground competitors and its underlying traits. Moreover, contemporary adaptive divergence between urban and rural plant populations has practical implications for weed control. The urban–rural model system can thus contribute to both basic and applied research in plant evolutionary ecology. Further research is required to understand adaptive divergence in plants between urban and rural environments, and the traits underlying the adaptation, not only above-ground but also below-ground.},\n\tlanguage = {en},\n\tnumber = {6},\n\turldate = {2022-04-12},\n\tjournal = {Journal of Ecology},\n\tauthor = {Fukano, Yuya and Guo, Wei and Uchida, Kei and Tachiki, Yuuya},\n\teditor = {Cornelissen, Hans},\n\tmonth = nov,\n\tyear = {2020},\n\tpages = {2521--2530},\n}\n\n
\n
\n\n\n
\n Competition among neighbouring plants plays essential roles in growth, reproduction, population dynamics, and community assembly, but how competition drives local adaptation and the traits underlying the adaptation remain unclear. Here, we focused on populations of the aggressive weed Digitaria ciliaris from urban and rural habitats as low- and high-competition environments for light resources and examined how competitive interaction drove contemporary adaptive divergence of competitive traits. To examine local adaptation to different competitive environments in D. ciliaris and the specific traits that have been selected for, we compared growth rate and competitive traits in plants from farmland and urban populations between high- and low-competition treatments. Furthermore, we conducted a field tillage experiment with drone (unmanned aerial vehicle) monitoring to examine the possibility that trait evolution in farmland habitats might influence weed management practices in crop fields. Plants from farmland populations had higher growth rates than plants from urban populations in high-competition treatments, and vice versa in low-competition treatments. Among populations, those with larger height/width ratios (farmland populations) were more tolerant of above-ground competition in high-competition treatments, but among individuals, those with larger ratios had lower growth rates in low-competition treatments. More plants from farmland populations, which had thicker stems (and larger height/width ratios), survived after experimental tillage than plants from urban habitats with thinner stems. Synthesis. Our study empirically demonstrated adaptive divergence in competitive traits in above-ground competitors and its underlying traits. Moreover, contemporary adaptive divergence between urban and rural plant populations has practical implications for weed control. The urban–rural model system can thus contribute to both basic and applied research in plant evolutionary ecology. Further research is required to understand adaptive divergence in plants between urban and rural environments, and the traits underlying the adaptation, not only above-ground but also below-ground.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Global Wheat Head Detection (GWHD) Dataset: A Large and Diverse Dataset of High-Resolution RGB-Labelled Images to Develop and Benchmark Wheat Head Detection Methods.\n \n \n \n \n\n\n \n David, E.; Madec, S.; Sadeghi-Tehran, P.; Aasen, H.; Zheng, B.; Liu, S.; Kirchgessner, N.; Ishikawa, G.; Nagasawa, K.; Badhon, M. A.; Pozniak, C.; de Solan, B.; Hund, A.; Chapman, S. C.; Baret, F.; Stavness, I.; and Guo, W.\n\n\n \n\n\n\n Plant Phenomics, 2020: 1–12. August 2020.\n \n\n\n\n
\n\n\n\n \n \n \"GlobalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{david_global_2020,\n\ttitle = {Global {Wheat} {Head} {Detection} ({GWHD}) {Dataset}: {A} {Large} and {Diverse} {Dataset} of {High}-{Resolution} {RGB}-{Labelled} {Images} to {Develop} and {Benchmark} {Wheat} {Head} {Detection} {Methods}},\n\tvolume = {2020},\n\tissn = {2643-6515},\n\tshorttitle = {Global {Wheat} {Head} {Detection} ({GWHD}) {Dataset}},\n\turl = {https://spj.sciencemag.org/journals/plantphenomics/2020/3521852/},\n\tdoi = {10.34133/2020/3521852},\n\tabstract = {The detection of wheat heads in plant images is an important task for estimating pertinent wheat traits including head population density and head characteristics such as health, size, maturity stage, and the presence of awns. Several studies have developed methods for wheat head detection from high-resolution RGB imagery based on machine learning algorithms. However, these methods have generally been calibrated and validated on limited datasets. High variability in observational conditions, genotypic differences, development stages, and head orientation makes wheat head detection a challenge for computer vision. Further, possible blurring due to motion or wind and overlap between heads for dense populations make this task even more complex. Through a joint international collaborative effort, we have built a large, diverse, and well-labelled dataset of wheat images, called the Global Wheat Head Detection (GWHD) dataset. It contains 4700 high-resolution RGB images and 190000 labelled wheat heads collected from several countries around the world at different growth stages with a wide range of genotypes. Guidelines for image acquisition, associating minimum metadata to respect FAIR principles, and consistent head labelling methods are proposed when developing new head detection datasets. The GWHD dataset is publicly available at\n              http://www.global-wheat.com/and\n              aimed at developing and benchmarking methods for wheat head detection.},\n\tlanguage = {en},\n\turldate = {2022-04-12},\n\tjournal = {Plant Phenomics},\n\tauthor = {David, Etienne and Madec, Simon and Sadeghi-Tehran, Pouria and Aasen, Helge and Zheng, Bangyou and Liu, Shouyang and Kirchgessner, Norbert and Ishikawa, Goro and Nagasawa, Koichi and Badhon, Minhajul A. and Pozniak, Curtis and de Solan, Benoit and Hund, Andreas and Chapman, Scott C. and Baret, Frédéric and Stavness, Ian and Guo, Wei},\n\tmonth = aug,\n\tyear = {2020},\n\tpages = {1--12},\n}\n\n
\n
\n\n\n
\n The detection of wheat heads in plant images is an important task for estimating pertinent wheat traits including head population density and head characteristics such as health, size, maturity stage, and the presence of awns. Several studies have developed methods for wheat head detection from high-resolution RGB imagery based on machine learning algorithms. However, these methods have generally been calibrated and validated on limited datasets. High variability in observational conditions, genotypic differences, development stages, and head orientation makes wheat head detection a challenge for computer vision. Further, possible blurring due to motion or wind and overlap between heads for dense populations make this task even more complex. Through a joint international collaborative effort, we have built a large, diverse, and well-labelled dataset of wheat images, called the Global Wheat Head Detection (GWHD) dataset. It contains 4700 high-resolution RGB images and 190000 labelled wheat heads collected from several countries around the world at different growth stages with a wide range of genotypes. Guidelines for image acquisition, associating minimum metadata to respect FAIR principles, and consistent head labelling methods are proposed when developing new head detection datasets. The GWHD dataset is publicly available at http://www.global-wheat.com/and aimed at developing and benchmarking methods for wheat head detection.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Field‐based individual plant phenotyping of herbaceous species by unmanned aerial vehicle.\n \n \n \n \n\n\n \n Guo, W.; Fukano, Y.; Noshita, K.; and Ninomiya, S.\n\n\n \n\n\n\n Ecology and Evolution, 10(21): 12318–12326. November 2020.\n \n\n\n\n
\n\n\n\n \n \n \"Field‐basedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{guo_fieldbased_2020,\n\ttitle = {Field‐based individual plant phenotyping of herbaceous species by unmanned aerial vehicle},\n\tvolume = {10},\n\tissn = {2045-7758, 2045-7758},\n\turl = {https://onlinelibrary.wiley.com/doi/10.1002/ece3.6861},\n\tdoi = {10.1002/ece3.6861},\n\tabstract = {Recent advances in Unmanned Aerial Vehicle (UAVs) and image processing have made high-throughput field phenotyping possible at plot/canopy level in the mass grown experiment. Such techniques are now expected to be used for individual level phenotyping in the single grown experiment.\nWe found two main challenges of phenotyping individual plants in the single grown experiment: plant segmentation from weedy backgrounds and the estimation of complex traits that are difficult to measure manually.\nIn this study, we proposed a methodological framework for field-based individual plant phenotyping by UAV. Two contributions, which are weed elimination for individual plant segmentation, and complex traits (volume and outline) extraction, have been developed. The framework demonstrated its utility in the phenotyping of Helianthus tuberosus (Jerusalem artichoke), an herbaceous perennial plant species.\nThe proposed framework can be applied to either small and large scale phenotyping experiments.},\n\tlanguage = {en},\n\tnumber = {21},\n\turldate = {2022-04-12},\n\tjournal = {Ecology and Evolution},\n\tauthor = {Guo, Wei and Fukano, Yuya and Noshita, Koji and Ninomiya, Seishi},\n\tmonth = nov,\n\tyear = {2020},\n\tpages = {12318--12326},\n}\n\n
\n
\n\n\n
\n Recent advances in Unmanned Aerial Vehicle (UAVs) and image processing have made high-throughput field phenotyping possible at plot/canopy level in the mass grown experiment. Such techniques are now expected to be used for individual level phenotyping in the single grown experiment. We found two main challenges of phenotyping individual plants in the single grown experiment: plant segmentation from weedy backgrounds and the estimation of complex traits that are difficult to measure manually. In this study, we proposed a methodological framework for field-based individual plant phenotyping by UAV. Two contributions, which are weed elimination for individual plant segmentation, and complex traits (volume and outline) extraction, have been developed. The framework demonstrated its utility in the phenotyping of Helianthus tuberosus (Jerusalem artichoke), an herbaceous perennial plant species. The proposed framework can be applied to either small and large scale phenotyping experiments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Genotype‐aggregated planting improves yield in Jerusalem artichoke ( Helianthus tuberosus ) due to self/non‐self‐discrimination.\n \n \n \n \n\n\n \n Fukano, Y.; Guo, W.; Noshita, K.; Hashida, S.; and Kamikawa, S.\n\n\n \n\n\n\n Evolutionary Applications, 12(3): 508–518. March 2019.\n \n\n\n\n
\n\n\n\n \n \n \"Genotype‐aggregatedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{fukano_genotypeaggregated_2019,\n\ttitle = {Genotype‐aggregated planting improves yield in {Jerusalem} artichoke ( \\textit{{Helianthus} tuberosus} ) due to self/non‐self‐discrimination},\n\tvolume = {12},\n\tissn = {1752-4571, 1752-4571},\n\turl = {https://onlinelibrary.wiley.com/doi/10.1111/eva.12735},\n\tdoi = {10.1111/eva.12735},\n\tabstract = {Accumulating evidence indicates that plants are capable of self/non-self and kin/stranger discrimination. Plants increase biomass of and resource allocation to roots when they encounter roots of conspecific non-self-neighbors, but not when they encounter self roots. Root proliferation usually occurs at the expense of reproductive investment. Therefore, if clonal crops are capable of self/non-self-discrimination, spatially aggregated planting with seedlings of the same genotype may decrease root proliferation and produce a higher yield than planting without considering seedling genotype. To test this idea, we grew Helianthus tuberosus (Jerusalem artichoke) in pot and field conditions and examined self/non-self-discrimination and the effectiveness of genotype-aggregated planting. Plants grown in self pairs allocated less to root biomass than plants grown in non-self pairs in both pot and field conditions; in field conditions, the self pairs produced 40\\% more tubers by weight than the non-self pairs. When six sprouts from seed tuber of two different genotypes were grown together, with the two genotypes planted aggregately (AGG) or alternately (ALT), plants in the AGG group produced 14\\% more tubers than plants in the ALT group. These results suggest that spatial aggregation of genotypes increases tuber production in H. tuberosus. Because we found no evidence for trade-offs between root biomass and tuber production, suppression of root proliferation may not be the only mechanism behind the benefits of genotype aggregation. By applying the concept of self/non-self-discrimination, farmers can increase crop production without additional external inputs or expansion of agricultural land use.},\n\tlanguage = {en},\n\tnumber = {3},\n\turldate = {2022-04-12},\n\tjournal = {Evolutionary Applications},\n\tauthor = {Fukano, Yuya and Guo, Wei and Noshita, Koji and Hashida, Shoko and Kamikawa, Shotaka},\n\tmonth = mar,\n\tyear = {2019},\n\tpages = {508--518},\n}\n\n
\n
\n\n\n
\n Accumulating evidence indicates that plants are capable of self/non-self and kin/stranger discrimination. Plants increase biomass of and resource allocation to roots when they encounter roots of conspecific non-self-neighbors, but not when they encounter self roots. Root proliferation usually occurs at the expense of reproductive investment. Therefore, if clonal crops are capable of self/non-self-discrimination, spatially aggregated planting with seedlings of the same genotype may decrease root proliferation and produce a higher yield than planting without considering seedling genotype. To test this idea, we grew Helianthus tuberosus (Jerusalem artichoke) in pot and field conditions and examined self/non-self-discrimination and the effectiveness of genotype-aggregated planting. Plants grown in self pairs allocated less to root biomass than plants grown in non-self pairs in both pot and field conditions; in field conditions, the self pairs produced 40% more tubers by weight than the non-self pairs. When six sprouts from seed tuber of two different genotypes were grown together, with the two genotypes planted aggregately (AGG) or alternately (ALT), plants in the AGG group produced 14% more tubers than plants in the ALT group. These results suggest that spatial aggregation of genotypes increases tuber production in H. tuberosus. Because we found no evidence for trade-offs between root biomass and tuber production, suppression of root proliferation may not be the only mechanism behind the benefits of genotype aggregation. By applying the concept of self/non-self-discrimination, farmers can increase crop production without additional external inputs or expansion of agricultural land use.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Pixel size of aerial imagery constrains the applications of unmanned aerial vehicle in crop breeding.\n \n \n \n \n\n\n \n Hu, P.; Guo, W.; Chapman, S. C.; Guo, Y.; and Zheng, B.\n\n\n \n\n\n\n ISPRS Journal of Photogrammetry and Remote Sensing, 154: 1–9. August 2019.\n \n\n\n\n
\n\n\n\n \n \n \"PixelPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{hu_pixel_2019,\n\ttitle = {Pixel size of aerial imagery constrains the applications of unmanned aerial vehicle in crop breeding},\n\tvolume = {154},\n\tissn = {09242716},\n\turl = {https://linkinghub.elsevier.com/retrieve/pii/S0924271619301303},\n\tdoi = {10.1016/j.isprsjprs.2019.05.008},\n\tabstract = {Image analysis using proximal sensors can help accelerate the selection process in plant breeding and improve the breeding efficiency. However, the accuracies of extracted phenotypic traits, especially those that require image classification, are affected by the pixel size in images. Ground coverage (GC), the ratio of projected to ground vegetation area to total land area, is a simple and important trait to monitor crop growth and development and is often captured by visual-spectrum cameras on multiple platforms from ground-based vehicles to satellites. In this study, we used GC as an example trait and explored its dependency on pixel size. In developing new spring wheat varieties, breeders often aim for rapid GC estimation, which is challenging especially when coverage is low ({\\textless}25\\%) in a species with thin leaves (ranging from 2 to 15 mm across). In a wheat trial comprising 28 treatments, high-resolution images were manually taken at ca. 1 m above canopies on seven occasions from emergence to flowering. Using a cubic interpolation algorithm, the original images with small pixel size were degraded into coarse images with large pixel size (from 0.1 to 5.0 cm per pixel, 26 extra levels in total) to mimic the image acquisition at different flight heights of an unmanned aerial vehicle (UAV) based platform. A machine learning based classification model was used to classify pixels of the original images and the corresponding degraded images into either vegetation and background classes, and then computed their GCs. GCs of original images were referred as reference values to their corresponding degraded images. As pixel size increased, GC of the degraded images tended to be underestimated when reference GC was less than about 50\\% and overestimated for GC {\\textgreater} 50\\%. The greatest errors (about 30\\%) were observed when reference GCs were around 30\\% and 70\\%. Meanwhile, the largest pixel sizes to distinguish between two treatments depended on the difference between GCs of the two treatments and were rapidly increased when differences were greater than the specific values at given significance levels (i.e. about 10\\%, 8\\% and 6\\% for P {\\textless} 0.01, 0.05 and 0.1, respectively). For wheat, small pixel size (e.g. {\\textless}0.1 cm) is always required to accurately estimate ground coverage when the most practical flight height is about 20 to 30 m at present. This study provides a guideline to choose appropriate pixel sizes and flight plans to estimate GC and other traits in crop breeding using UAV based HTP platforms.},\n\tlanguage = {en},\n\turldate = {2022-04-12},\n\tjournal = {ISPRS Journal of Photogrammetry and Remote Sensing},\n\tauthor = {Hu, Pengcheng and Guo, Wei and Chapman, Scott C. and Guo, Yan and Zheng, Bangyou},\n\tmonth = aug,\n\tyear = {2019},\n\tpages = {1--9},\n}\n\n
\n
\n\n\n
\n Image analysis using proximal sensors can help accelerate the selection process in plant breeding and improve the breeding efficiency. However, the accuracies of extracted phenotypic traits, especially those that require image classification, are affected by the pixel size in images. Ground coverage (GC), the ratio of projected to ground vegetation area to total land area, is a simple and important trait to monitor crop growth and development and is often captured by visual-spectrum cameras on multiple platforms from ground-based vehicles to satellites. In this study, we used GC as an example trait and explored its dependency on pixel size. In developing new spring wheat varieties, breeders often aim for rapid GC estimation, which is challenging especially when coverage is low (\\textless25%) in a species with thin leaves (ranging from 2 to 15 mm across). In a wheat trial comprising 28 treatments, high-resolution images were manually taken at ca. 1 m above canopies on seven occasions from emergence to flowering. Using a cubic interpolation algorithm, the original images with small pixel size were degraded into coarse images with large pixel size (from 0.1 to 5.0 cm per pixel, 26 extra levels in total) to mimic the image acquisition at different flight heights of an unmanned aerial vehicle (UAV) based platform. A machine learning based classification model was used to classify pixels of the original images and the corresponding degraded images into either vegetation and background classes, and then computed their GCs. GCs of original images were referred as reference values to their corresponding degraded images. As pixel size increased, GC of the degraded images tended to be underestimated when reference GC was less than about 50% and overestimated for GC \\textgreater 50%. The greatest errors (about 30%) were observed when reference GCs were around 30% and 70%. Meanwhile, the largest pixel sizes to distinguish between two treatments depended on the difference between GCs of the two treatments and were rapidly increased when differences were greater than the specific values at given significance levels (i.e. about 10%, 8% and 6% for P \\textless 0.01, 0.05 and 0.1, respectively). For wheat, small pixel size (e.g. \\textless0.1 cm) is always required to accurately estimate ground coverage when the most practical flight height is about 20 to 30 m at present. This study provides a guideline to choose appropriate pixel sizes and flight plans to estimate GC and other traits in crop breeding using UAV based HTP platforms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Weakly Supervised Deep Learning Framework for Sorghum Head Detection and Counting.\n \n \n \n \n\n\n \n Ghosal, S.; Zheng, B.; Chapman, S. C.; Potgieter, A. B.; Jordan, D. R.; Wang, X.; Singh, A. K.; Singh, A.; Hirafuji, M.; Ninomiya, S.; Ganapathysubramanian, B.; Sarkar, S.; and Guo, W.\n\n\n \n\n\n\n Plant Phenomics, 2019: 1–14. June 2019.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{ghosal_weakly_2019,\n\ttitle = {A {Weakly} {Supervised} {Deep} {Learning} {Framework} for {Sorghum} {Head} {Detection} and {Counting}},\n\tvolume = {2019},\n\tissn = {2643-6515},\n\turl = {https://spj.sciencemag.org/journals/plantphenomics/2019/1525874/},\n\tdoi = {10.34133/2019/1525874},\n\tabstract = {The yield of cereal crops such as sorghum (\n              Sorghum bicolor\n              L. Moench) depends on the distribution of crop-heads in varying branching arrangements. Therefore, counting the head number per unit area is critical for plant breeders to correlate with the genotypic variation in a specific breeding field. However, measuring such phenotypic traits manually is an extremely labor-intensive process and suffers from low efficiency and human errors. Moreover, the process is almost infeasible for large-scale breeding plantations or experiments. Machine learning-based approaches like deep convolutional neural network (CNN) based object detectors are promising tools for efficient object detection and counting. However, a significant limitation of such deep learning-based approaches is that they typically require a massive amount of hand-labeled images for training, which is still a tedious process. Here, we propose an active learning inspired weakly supervised deep learning framework for sorghum head detection and counting from UAV-based images. We demonstrate that it is possible to significantly reduce human labeling effort without compromising final model performance (\n              \n                \n                  \n                    \n                      R\n                    \n                    \n                      2\n                    \n                  \n                \n              \n              between human count and machine count is 0.88) by using a semitrained CNN model (i.e., trained with limited labeled data) to perform synthetic annotation. In addition, we also visualize key features that the network learns. This improves trustworthiness by enabling users to better understand and trust the decisions that the trained deep learning model makes.},\n\tlanguage = {en},\n\turldate = {2022-04-12},\n\tjournal = {Plant Phenomics},\n\tauthor = {Ghosal, Sambuddha and Zheng, Bangyou and Chapman, Scott C. and Potgieter, Andries B. and Jordan, David R. and Wang, Xuemin and Singh, Asheesh K. and Singh, Arti and Hirafuji, Masayuki and Ninomiya, Seishi and Ganapathysubramanian, Baskar and Sarkar, Soumik and Guo, Wei},\n\tmonth = jun,\n\tyear = {2019},\n\tpages = {1--14},\n}\n\n
\n
\n\n\n
\n The yield of cereal crops such as sorghum ( Sorghum bicolor L. Moench) depends on the distribution of crop-heads in varying branching arrangements. Therefore, counting the head number per unit area is critical for plant breeders to correlate with the genotypic variation in a specific breeding field. However, measuring such phenotypic traits manually is an extremely labor-intensive process and suffers from low efficiency and human errors. Moreover, the process is almost infeasible for large-scale breeding plantations or experiments. Machine learning-based approaches like deep convolutional neural network (CNN) based object detectors are promising tools for efficient object detection and counting. However, a significant limitation of such deep learning-based approaches is that they typically require a massive amount of hand-labeled images for training, which is still a tedious process. Here, we propose an active learning inspired weakly supervised deep learning framework for sorghum head detection and counting from UAV-based images. We demonstrate that it is possible to significantly reduce human labeling effort without compromising final model performance ( R 2 between human count and machine count is 0.88) by using a semitrained CNN model (i.e., trained with limited labeled data) to perform synthetic annotation. In addition, we also visualize key features that the network learns. This improves trustworthiness by enabling users to better understand and trust the decisions that the trained deep learning model makes.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic estimation of heading date of paddy rice using deep learning.\n \n \n \n \n\n\n \n Desai, S. V.; Balasubramanian, V. N.; Fukatsu, T.; Ninomiya, S.; and Guo, W.\n\n\n \n\n\n\n Plant Methods, 15(1): 76. December 2019.\n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{desai_automatic_2019,\n\ttitle = {Automatic estimation of heading date of paddy rice using deep learning},\n\tvolume = {15},\n\tissn = {1746-4811},\n\turl = {https://plantmethods.biomedcentral.com/articles/10.1186/s13007-019-0457-1},\n\tdoi = {10.1186/s13007-019-0457-1},\n\tabstract = {Background\nAccurate estimation of heading date of paddy rice greatly helps the breeders to understand the adaptability of different crop varieties in a given location. The heading date also plays a vital role in determining grain yield for research experiments. Visual examination of the crop is laborious and time consuming. Therefore, quick and precise estimation of heading date of paddy rice is highly essential.\n\nResults\nIn this work, we propose a simple pipeline to detect regions containing flowering panicles from ground level RGB images of paddy rice. Given a fixed region size for an image, the number of regions containing flowering panicles is directly proportional to the number of flowering panicles present. Consequently, we use the flowering panicle region counts to estimate the heading date of the crop. The method is based on image classification using Convolutional Neural Networks. We evaluated the performance of our algorithm on five time series image sequences of three different varieties of rice crops. When compared to the previous work on this dataset, the accuracy and general versatility of the method has been improved and heading date has been estimated with a mean absolute error of less than 1 day.\n\nConclusion\nAn efficient heading date estimation method has been described for rice crops using time series RGB images of crop under natural field conditions. This study demonstrated that our method can reliably be used as a replacement of manual observation to detect the heading date of rice crops.},\n\tlanguage = {en},\n\tnumber = {1},\n\turldate = {2022-04-12},\n\tjournal = {Plant Methods},\n\tauthor = {Desai, Sai Vikas and Balasubramanian, Vineeth N. and Fukatsu, Tokihiro and Ninomiya, Seishi and Guo, Wei},\n\tmonth = dec,\n\tyear = {2019},\n\tpages = {76},\n}\n\n
\n
\n\n\n
\n Background Accurate estimation of heading date of paddy rice greatly helps the breeders to understand the adaptability of different crop varieties in a given location. The heading date also plays a vital role in determining grain yield for research experiments. Visual examination of the crop is laborious and time consuming. Therefore, quick and precise estimation of heading date of paddy rice is highly essential. Results In this work, we propose a simple pipeline to detect regions containing flowering panicles from ground level RGB images of paddy rice. Given a fixed region size for an image, the number of regions containing flowering panicles is directly proportional to the number of flowering panicles present. Consequently, we use the flowering panicle region counts to estimate the heading date of the crop. The method is based on image classification using Convolutional Neural Networks. We evaluated the performance of our algorithm on five time series image sequences of three different varieties of rice crops. When compared to the previous work on this dataset, the accuracy and general versatility of the method has been improved and heading date has been estimated with a mean absolute error of less than 1 day. Conclusion An efficient heading date estimation method has been described for rice crops using time series RGB images of crop under natural field conditions. This study demonstrated that our method can reliably be used as a replacement of manual observation to detect the heading date of rice crops.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Easy MPE: Extraction of Quality Microplot Images for UAV-Based High-Throughput Field Phenotyping.\n \n \n \n \n\n\n \n Tresch, L.; Mu, Y.; Itoh, A.; Kaga, A.; Taguchi, K.; Hirafuji, M.; Ninomiya, S.; and Guo, W.\n\n\n \n\n\n\n Plant Phenomics, 2019: 1–9. November 2019.\n \n\n\n\n
\n\n\n\n \n \n \"EasyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{tresch_easy_2019,\n\ttitle = {Easy {MPE}: {Extraction} of {Quality} {Microplot} {Images} for {UAV}-{Based} {High}-{Throughput} {Field} {Phenotyping}},\n\tvolume = {2019},\n\tissn = {2643-6515},\n\tshorttitle = {Easy {MPE}},\n\turl = {https://spj.sciencemag.org/journals/plantphenomics/2019/2591849/},\n\tdoi = {10.34133/2019/2591849},\n\tabstract = {Microplot extraction (PE) is a necessary image processing step in unmanned aerial vehicle- (UAV-) based research on breeding fields. At present, it is manually using ArcGIS, QGIS, or other GIS-based software, but achieving the desired accuracy is time-consuming. We therefore developed an intuitive, easy-to-use semiautomatic program for MPE called Easy MPE to enable researchers and others to access reliable plot data UAV images of whole fields under variable field conditions. The program uses four major steps: (1) binary segmentation, (2) microplot extraction, (3) production of\n              \n                ∗\n              \n              .shp files to enable further file manipulation, and (4) projection of individual microplots generated from the orthomosaic back onto the raw aerial UAV images to preserve the image quality. Crop rows were successfully identified in all trial fields. The performance of the proposed method was evaluated by calculating the intersection-over-union (IOU) ratio between microplots determined manually and by Easy MPE: the average IOU (±SD) of all trials was 91\\% (±3).},\n\tlanguage = {en},\n\turldate = {2022-04-12},\n\tjournal = {Plant Phenomics},\n\tauthor = {Tresch, Léa and Mu, Yue and Itoh, Atsushi and Kaga, Akito and Taguchi, Kazunori and Hirafuji, Masayuki and Ninomiya, Seishi and Guo, Wei},\n\tmonth = nov,\n\tyear = {2019},\n\tpages = {1--9},\n}\n\n
\n
\n\n\n
\n Microplot extraction (PE) is a necessary image processing step in unmanned aerial vehicle- (UAV-) based research on breeding fields. At present, it is manually using ArcGIS, QGIS, or other GIS-based software, but achieving the desired accuracy is time-consuming. We therefore developed an intuitive, easy-to-use semiautomatic program for MPE called Easy MPE to enable researchers and others to access reliable plot data UAV images of whole fields under variable field conditions. The program uses four major steps: (1) binary segmentation, (2) microplot extraction, (3) production of ∗ .shp files to enable further file manipulation, and (4) projection of individual microplots generated from the orthomosaic back onto the raw aerial UAV images to preserve the image quality. Crop rows were successfully identified in all trial fields. The performance of the proposed method was evaluated by calculating the intersection-over-union (IOU) ratio between microplots determined manually and by Easy MPE: the average IOU (±SD) of all trials was 91% (±3).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n McGET: A rapid image-based method to determine the morphological characteristics of gravels on the Gobi desert surface.\n \n \n \n \n\n\n \n Mu, Y.; Wang, F.; Zheng, B.; Guo, W.; and Feng, Y.\n\n\n \n\n\n\n Geomorphology, 304: 89–98. March 2018.\n \n\n\n\n
\n\n\n\n \n \n \"McGET:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{mu_mcget_2018,\n\ttitle = {{McGET}: {A} rapid image-based method to determine the morphological characteristics of gravels on the {Gobi} desert surface},\n\tvolume = {304},\n\tissn = {0169-555X},\n\tshorttitle = {{McGET}},\n\turl = {https://www.sciencedirect.com/science/article/pii/S0169555X17305354},\n\tdoi = {10.1016/j.geomorph.2017.12.027},\n\tabstract = {The relationship between morphological characteristics (e.g. gravel size, coverage, angularity and orientation) and local geomorphic features (e.g. slope gradient and aspect) of desert has been used to explore the evolution process of Gobi desert. Conventional quantification methods are time-consuming, inefficient and even prove impossible to determine the characteristics of large numbers of gravels. We propose a rapid image-based method to obtain the morphological characteristics of gravels on the Gobi desert surface, which is called the “morphological characteristics gained effectively technique” (McGET). The image of the Gobi desert surface was classified into gravel clusters and background by a machine-learning “classification and regression tree” (CART) algorithm. Then gravel clusters were segmented into individual gravel clasts by separating objects in images using a “watershed segmentation” algorithm. Thirdly, gravel coverage, diameter, aspect ratio and orientation were calculated based on the basic principles of 2D computer graphics. We validated this method with two independent datasets in which the gravel morphological characteristics were obtained from 2728 gravels measured in the field and 7422 gravels measured by manual digitization. Finally, we applied McGET to derive the spatial variation of gravel morphology on the Gobi desert along an alluvial-proluvial fan located in Hami, Xinjiang, China. The validated results show that the mean gravel diameter measured in the field agreed well with that calculated by McGET for large gravels (R2=0.89, P{\\textless}0.001). Compared to manual digitization, the McGET accuracies for gravel coverage, gravel diameter and aspect ratio were 97\\%, 83\\% and 96\\%, respectively. The orientation distributions calculated were consistent across two different methods. More importantly, McGET significantly shortens the time cost in obtaining gravel morphological characteristics in the field and laboratory. The spatial variation results show that the gravel coverage ranged from 88\\% to 65\\%, the gravel diameter was unimodally distributed and ranged from 19mm to 13mm. Most gravels were bladed or rod-like, with a mean aspect ratio of 1.57, and had no preferred orientation on the surveyed Gobi desert. From the center to the edge of the fan, gravel coverage decreased 2.2\\% per 100m elevation decrease (R2=0.69, P{\\textless}0.001), mean gravel diameter decreased 0.5mm per 100m elevation decrease (R2=0.52, P{\\textless}0.001), and mean aspect ratio slightly increased 0.004 per 100m elevation decrease (R2=0.26, P{\\textless}0.05). These results imply that surface washing was the main process on the investigated Gobi desert. This study demonstrates that the new method can quickly and accurately calculate the gravel coverage, diameter, aspect ratio and orientation from the images of Gobi desert.},\n\tlanguage = {en},\n\turldate = {2022-04-12},\n\tjournal = {Geomorphology},\n\tauthor = {Mu, Yue and Wang, Feng and Zheng, Bangyou and Guo, Wei and Feng, Yiming},\n\tmonth = mar,\n\tyear = {2018},\n\tkeywords = {Classification and regression tree (CART), Desert pavement, Gravel coverage, Gravel morphology, Image analysis},\n\tpages = {89--98},\n}\n\n
\n
\n\n\n
\n The relationship between morphological characteristics (e.g. gravel size, coverage, angularity and orientation) and local geomorphic features (e.g. slope gradient and aspect) of desert has been used to explore the evolution process of Gobi desert. Conventional quantification methods are time-consuming, inefficient and even prove impossible to determine the characteristics of large numbers of gravels. We propose a rapid image-based method to obtain the morphological characteristics of gravels on the Gobi desert surface, which is called the “morphological characteristics gained effectively technique” (McGET). The image of the Gobi desert surface was classified into gravel clusters and background by a machine-learning “classification and regression tree” (CART) algorithm. Then gravel clusters were segmented into individual gravel clasts by separating objects in images using a “watershed segmentation” algorithm. Thirdly, gravel coverage, diameter, aspect ratio and orientation were calculated based on the basic principles of 2D computer graphics. We validated this method with two independent datasets in which the gravel morphological characteristics were obtained from 2728 gravels measured in the field and 7422 gravels measured by manual digitization. Finally, we applied McGET to derive the spatial variation of gravel morphology on the Gobi desert along an alluvial-proluvial fan located in Hami, Xinjiang, China. The validated results show that the mean gravel diameter measured in the field agreed well with that calculated by McGET for large gravels (R2=0.89, P\\textless0.001). Compared to manual digitization, the McGET accuracies for gravel coverage, gravel diameter and aspect ratio were 97%, 83% and 96%, respectively. The orientation distributions calculated were consistent across two different methods. More importantly, McGET significantly shortens the time cost in obtaining gravel morphological characteristics in the field and laboratory. The spatial variation results show that the gravel coverage ranged from 88% to 65%, the gravel diameter was unimodally distributed and ranged from 19mm to 13mm. Most gravels were bladed or rod-like, with a mean aspect ratio of 1.57, and had no preferred orientation on the surveyed Gobi desert. From the center to the edge of the fan, gravel coverage decreased 2.2% per 100m elevation decrease (R2=0.69, P\\textless0.001), mean gravel diameter decreased 0.5mm per 100m elevation decrease (R2=0.52, P\\textless0.001), and mean aspect ratio slightly increased 0.004 per 100m elevation decrease (R2=0.26, P\\textless0.05). These results imply that surface washing was the main process on the investigated Gobi desert. This study demonstrates that the new method can quickly and accurately calculate the gravel coverage, diameter, aspect ratio and orientation from the images of Gobi desert.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Development of Data Distribution and Display Methods for Time Series Aerial Drone Images in International Image Interoperability Framework.\n \n \n \n\n\n \n Itoh, A.; Guo, W.; Taguchi, K.; and Hirafuji, M.\n\n\n \n\n\n\n Agricultural Information Research, 27(2): 28–38. June 2018.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{itoh_development_2018,\n\ttitle = {Development of {Data} {Distribution} and {Display} {Methods} for {Time} {Series} {Aerial} {Drone} {Images} in {International} {Image} {Interoperability} {Framework}},\n\tvolume = {27},\n\tdoi = {10.3173/air.27.28},\n\tabstract = {In recent years, aerial drones have been increasingly used for field phenotyping of crop research fields: photographs taken at low altitude are combined in an orthomosaic with positional information and stored in GeoTIFF format. Many GeoTIFF files are too large to allow easy display in image viewer software, or to distribute and browse on the Web. To overcome this limitation, we developed a method to distribute high-resolution orthomosaic images on the Web using the International Image Interoperability Framework (IIIF). By converting GeoTIFF files to map tiles in advance, this method allows the smooth delivery of images according to IIIF specifications. We show the high versatility of this method by using an existing IIIF-compatible image viewer to browse converted images. We also show its high flexibility by implementing an original image viewer using an existing IIIF-compatible library.},\n\tnumber = {2},\n\tjournal = {Agricultural Information Research},\n\tauthor = {Itoh, Atsushi and Guo, Wei and Taguchi, Kazunori and Hirafuji, Masayuki},\n\tmonth = jun,\n\tyear = {2018},\n\tkeywords = {GeoTIFF, IIIF, high-resolution image, image viewer, map tile, time-series data, 地図タイル, 時系列データ, 画像ビューア, 高解像度画像},\n\tpages = {28--38},\n}\n\n
\n
\n\n\n
\n In recent years, aerial drones have been increasingly used for field phenotyping of crop research fields: photographs taken at low altitude are combined in an orthomosaic with positional information and stored in GeoTIFF format. Many GeoTIFF files are too large to allow easy display in image viewer software, or to distribute and browse on the Web. To overcome this limitation, we developed a method to distribute high-resolution orthomosaic images on the Web using the International Image Interoperability Framework (IIIF). By converting GeoTIFF files to map tiles in advance, this method allows the smooth delivery of images according to IIIF specifications. We show the high versatility of this method by using an existing IIIF-compatible image viewer to browse converted images. We also show its high flexibility by implementing an original image viewer using an existing IIIF-compatible library.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Characterization of peach tree crown by using high-resolution images from an unmanned aerial vehicle.\n \n \n \n \n\n\n \n Mu, Y.; Fujii, Y.; Takata, D.; Zheng, B.; Noshita, K.; Honda, K.; Ninomiya, S.; and Guo, W.\n\n\n \n\n\n\n Horticulture Research, 5: 74. January 2018.\n \n\n\n\n
\n\n\n\n \n \n \"CharacterizationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{mu_characterization_2018,\n\ttitle = {Characterization of peach tree crown by using high-resolution images from an unmanned aerial vehicle},\n\tvolume = {5},\n\tissn = {2052-7276},\n\turl = {https://doi.org/10.1038/s41438-018-0097-z},\n\tdoi = {10.1038/s41438-018-0097-z},\n\tabstract = {In orchards, measuring crown characteristics is essential for monitoring the dynamics of tree growth and optimizing farm management. However, it lacks a rapid and reliable method of extracting the features of trees with an irregular crown shape such as trained peach trees. Here, we propose an efficient method of segmenting the individual trees and measuring the crown width and crown projection area (CPA) of peach trees with time-series information, based on gathered images. The images of peach trees were collected by unmanned aerial vehicles in an orchard in Okayama, Japan, and then the digital surface model was generated by using a Structure from Motion (SfM) and Multi-View Stereo (MVS) based software. After individual trees were identified through the use of an adaptive threshold and marker-controlled watershed segmentation in the digital surface model, the crown widths and CPA were calculated, and the accuracy was evaluated against manual delineation and field measurement, respectively. Taking manual delineation of 12 trees as reference, the root-mean-square errors of the proposed method were 0.08 m (R2 = 0.99) and 0.15 m (R2 = 0.93) for the two orthogonal crown widths, and 3.87 m2 for CPA (R2 = 0.89), while those taking field measurement of 44 trees as reference were 0.47 m (R2 = 0.91), 0.51 m (R2 = 0.74), and 4.96 m2 (R2 = 0.88). The change of growth rate of CPA showed that the peach trees grew faster from May to July than from July to September, with a wide variation in relative growth rates among trees. Not only can this method save labour by replacing field measurement, but also it can allow farmers to monitor the growth of orchard trees dynamically.},\n\turldate = {2022-04-12},\n\tjournal = {Horticulture Research},\n\tauthor = {Mu, Yue and Fujii, Yuichiro and Takata, Daisuke and Zheng, Bangyou and Noshita, Koji and Honda, Kiyoshi and Ninomiya, Seishi and Guo, Wei},\n\tmonth = jan,\n\tyear = {2018},\n\tpages = {74},\n}\n\n
\n
\n\n\n
\n In orchards, measuring crown characteristics is essential for monitoring the dynamics of tree growth and optimizing farm management. However, it lacks a rapid and reliable method of extracting the features of trees with an irregular crown shape such as trained peach trees. Here, we propose an efficient method of segmenting the individual trees and measuring the crown width and crown projection area (CPA) of peach trees with time-series information, based on gathered images. The images of peach trees were collected by unmanned aerial vehicles in an orchard in Okayama, Japan, and then the digital surface model was generated by using a Structure from Motion (SfM) and Multi-View Stereo (MVS) based software. After individual trees were identified through the use of an adaptive threshold and marker-controlled watershed segmentation in the digital surface model, the crown widths and CPA were calculated, and the accuracy was evaluated against manual delineation and field measurement, respectively. Taking manual delineation of 12 trees as reference, the root-mean-square errors of the proposed method were 0.08 m (R2 = 0.99) and 0.15 m (R2 = 0.93) for the two orthogonal crown widths, and 3.87 m2 for CPA (R2 = 0.89), while those taking field measurement of 44 trees as reference were 0.47 m (R2 = 0.91), 0.51 m (R2 = 0.74), and 4.96 m2 (R2 = 0.88). The change of growth rate of CPA showed that the peach trees grew faster from May to July than from July to September, with a wide variation in relative growth rates among trees. Not only can this method save labour by replacing field measurement, but also it can allow farmers to monitor the growth of orchard trees dynamically.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Aerial Imagery Analysis – Quantifying Appearance and Number of Sorghum Heads for Applications in Breeding and Agronomy.\n \n \n \n \n\n\n \n Guo, W.; Zheng, B.; Potgieter, A. B.; Diot, J.; Watanabe, K.; Noshita, K.; Jordan, D. R.; Wang, X.; Watson, J.; Ninomiya, S.; and Chapman, S. C.\n\n\n \n\n\n\n Frontiers in Plant Science, 9. October 2018.\n \n\n\n\n
\n\n\n\n \n \n \"AerialPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{guo_aerial_2018,\n\ttitle = {Aerial {Imagery} {Analysis} – {Quantifying} {Appearance} and {Number} of {Sorghum} {Heads} for {Applications} in {Breeding} and {Agronomy}},\n\tvolume = {9},\n\tissn = {1664-462X},\n\turl = {https://www.frontiersin.org/article/10.3389/fpls.2018.01544},\n\tdoi = {10.3389/fpls.2018.01544},\n\tabstract = {Sorghum (Sorghum bicolor L. Moench) is a C4 tropical grass that plays an essential role in providing nutrition to humans and livestock, particularly in marginal rainfall environments. The timing of head development and the number of heads per unit area are key adaptation traits to consider in agronomy and breeding but are time consuming and labor intensive to measure. We propose a two-step machine-based image processing method to detect and count the number of heads from high-resolution images captured by unmanned aerial vehicles (UAVs) in a breeding trial. To demonstrate the performance of the proposed method, 52 images were manually labeled; the precision and recall of head detection were 0.87 and 0.98, respectively, and the coefficient of determination (R2) between the manual and new methods of counting was 0.84. To verify the utility of the method in breeding programs, a geolocation-based plot segmentation method was applied to pre-processed ortho-mosaic images to extract {\\textgreater}1000 plots from original RGB images. Forty of these plots were randomly selected and labeled manually; the precision and recall of detection were 0.82 and 0.98, respectively, and the coefficient of determination between manual and algorithm counting was 0.56, with the major source of error being related to the morphology of plants resulting in heads being displayed both within and outside the plot in which the plants were sown, i.e., being allocated to a neighboring plot. Finally, the potential applications in yield estimation from UAV-based imagery from agronomy experiments and scouting of production fields are also discussed.},\n\turldate = {2022-04-12},\n\tjournal = {Frontiers in Plant Science},\n\tauthor = {Guo, Wei and Zheng, Bangyou and Potgieter, Andries B. and Diot, Julien and Watanabe, Kakeru and Noshita, Koji and Jordan, David R. and Wang, Xuemin and Watson, James and Ninomiya, Seishi and Chapman, Scott C.},\n\tmonth = oct,\n\tyear = {2018},\n}\n\n
\n
\n\n\n
\n Sorghum (Sorghum bicolor L. Moench) is a C4 tropical grass that plays an essential role in providing nutrition to humans and livestock, particularly in marginal rainfall environments. The timing of head development and the number of heads per unit area are key adaptation traits to consider in agronomy and breeding but are time consuming and labor intensive to measure. We propose a two-step machine-based image processing method to detect and count the number of heads from high-resolution images captured by unmanned aerial vehicles (UAVs) in a breeding trial. To demonstrate the performance of the proposed method, 52 images were manually labeled; the precision and recall of head detection were 0.87 and 0.98, respectively, and the coefficient of determination (R2) between the manual and new methods of counting was 0.84. To verify the utility of the method in breeding programs, a geolocation-based plot segmentation method was applied to pre-processed ortho-mosaic images to extract \\textgreater1000 plots from original RGB images. Forty of these plots were randomly selected and labeled manually; the precision and recall of detection were 0.82 and 0.98, respectively, and the coefficient of determination between manual and algorithm counting was 0.56, with the major source of error being related to the morphology of plants resulting in heads being displayed both within and outside the plot in which the plants were sown, i.e., being allocated to a neighboring plot. Finally, the potential applications in yield estimation from UAV-based imagery from agronomy experiments and scouting of production fields are also discussed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Tutorial: image sensing techniques for field phenotyping.\n \n \n \n \n\n\n \n Watanabe, K.; and Guo, W.\n\n\n \n\n\n\n Breeding Research, 20(1): 64–68. June 2018.\n \n\n\n\n
\n\n\n\n \n \n \"Tutorial:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{watanabe_tutorial_2018,\n\ttitle = {Tutorial: image sensing techniques for field phenotyping},\n\tvolume = {20},\n\tissn = {1344-7629, 1348-1290},\n\tshorttitle = {Tutorial},\n\turl = {https://www.jstage.jst.go.jp/article/jsbbr/20/1/20_20.W03/_article/-char/ja/},\n\tdoi = {10.1270/jsbbr.20.W03},\n\tlanguage = {en},\n\tnumber = {1},\n\turldate = {2022-04-12},\n\tjournal = {Breeding Research},\n\tauthor = {Watanabe, Kakeru and Guo, Wei},\n\tmonth = jun,\n\tyear = {2018},\n\tpages = {64--68},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n High-Throughput Phenotyping of Sorghum Plant Height Using an Unmanned Aerial Vehicle and Its Application to Genomic Prediction Modeling.\n \n \n \n \n\n\n \n Watanabe, K.; Guo, W.; Arai, K.; Takanashi, H.; Kajiya-Kanegae, H.; Kobayashi, M.; Yano, K.; Tokunaga, T.; Fujiwara, T.; Tsutsumi, N.; and Iwata, H.\n\n\n \n\n\n\n Frontiers in Plant Science, 8. March 2017.\n \n\n\n\n
\n\n\n\n \n \n \"High-ThroughputPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{watanabe_high-throughput_2017,\n\ttitle = {High-{Throughput} {Phenotyping} of {Sorghum} {Plant} {Height} {Using} an {Unmanned} {Aerial} {Vehicle} and {Its} {Application} to {Genomic} {Prediction} {Modeling}},\n\tvolume = {8},\n\tissn = {1664-462X},\n\turl = {https://www.frontiersin.org/article/10.3389/fpls.2017.00421},\n\tdoi = {10.3389/fpls.2017.00421},\n\tabstract = {Genomics-assisted breeding methods have been rapidly developed with novel technologies such as next-generation sequencing, genomic selection and genome-wide association study. However, phenotyping is still time consuming and is a serious bottleneck in genomics-assisted breeding. In this study, we established a high-throughput phenotyping system for sorghum plant height and its response to nitrogen availability; this system relies on the use of unmanned aerial vehicle (UAV) remote sensing with either an RGB or near-infrared, green and blue (NIR-GB) camera. We evaluated the potential of remote sensing to provide phenotype training data in a genomic prediction model. UAV remote sensing with the NIR-GB camera and the 50th percentile of digital surface model, which is an indicator of height, performed well. The correlation coefficient between plant height measured by UAV remote sensing (PHUAV) and plant height measured with a ruler (PHR) was 0.523. Because PHUAV was overestimated (probably because of the presence of taller plants on adjacent plots), the correlation coefficient between PHUAV and PHR was increased to 0.678 by using one of the two replications (that with the lower PHUAV value). Genomic prediction modeling performed well under the low-fertilization condition, probably because PHUAV overestimation was smaller under this condition due to a lower plant height. The predicted values of PHUAV and PHR were highly correlated with each other (r = 0.842). This result suggests that the genomic prediction models generated with PHUAV were almost identical and that the performance of UAV remote sensing was similar to that of traditional measurements in genomic prediction modeling. UAV remote sensing has a high potential to increase the throughput of phenotyping and decrease its cost. UAV remote sensing will be an important and indispensable tool for high-throughput genomics-assisted plant breeding.},\n\turldate = {2022-04-12},\n\tjournal = {Frontiers in Plant Science},\n\tauthor = {Watanabe, Kakeru and Guo, Wei and Arai, Keigo and Takanashi, Hideki and Kajiya-Kanegae, Hiromi and Kobayashi, Masaaki and Yano, Kentaro and Tokunaga, Tsuyoshi and Fujiwara, Toru and Tsutsumi, Nobuhiro and Iwata, Hiroyoshi},\n\tmonth = mar,\n\tyear = {2017},\n}\n\n
\n
\n\n\n
\n Genomics-assisted breeding methods have been rapidly developed with novel technologies such as next-generation sequencing, genomic selection and genome-wide association study. However, phenotyping is still time consuming and is a serious bottleneck in genomics-assisted breeding. In this study, we established a high-throughput phenotyping system for sorghum plant height and its response to nitrogen availability; this system relies on the use of unmanned aerial vehicle (UAV) remote sensing with either an RGB or near-infrared, green and blue (NIR-GB) camera. We evaluated the potential of remote sensing to provide phenotype training data in a genomic prediction model. UAV remote sensing with the NIR-GB camera and the 50th percentile of digital surface model, which is an indicator of height, performed well. The correlation coefficient between plant height measured by UAV remote sensing (PHUAV) and plant height measured with a ruler (PHR) was 0.523. Because PHUAV was overestimated (probably because of the presence of taller plants on adjacent plots), the correlation coefficient between PHUAV and PHR was increased to 0.678 by using one of the two replications (that with the lower PHUAV value). Genomic prediction modeling performed well under the low-fertilization condition, probably because PHUAV overestimation was smaller under this condition due to a lower plant height. The predicted values of PHUAV and PHR were highly correlated with each other (r = 0.842). This result suggests that the genomic prediction models generated with PHUAV were almost identical and that the performance of UAV remote sensing was similar to that of traditional measurements in genomic prediction modeling. UAV remote sensing has a high potential to increase the throughput of phenotyping and decrease its cost. UAV remote sensing will be an important and indispensable tool for high-throughput genomics-assisted plant breeding.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Comparison of ground cover estimates from experiment plots in cotton, sorghum and sugarcane based on images and ortho-mosaics captured by UAV.\n \n \n \n \n\n\n \n Duan, T.; Zheng, B.; Guo, W.; Ninomiya, S.; Guo, Y.; Chapman, S. C.; Duan, T.; Zheng, B.; Guo, W.; Ninomiya, S.; Guo, Y.; and Chapman, S. C.\n\n\n \n\n\n\n Functional Plant Biology, 44(1): 169–183. November 2016.\n \n\n\n\n
\n\n\n\n \n \n \"ComparisonPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{duan_comparison_2016,\n\ttitle = {Comparison of ground cover estimates from experiment plots in cotton, sorghum and sugarcane based on images and ortho-mosaics captured by {UAV}},\n\tvolume = {44},\n\tissn = {1445-4416, 1445-4416},\n\turl = {https://www.publish.csiro.au/fp/FP16123},\n\tdoi = {10.1071/FP16123},\n\tabstract = {Ground cover is an important physiological trait affecting crop radiation capture, water-use efficiency and grain yield. It is challenging to efficiently measure ground cover with reasonable precision for large numbers of plots, especially in tall crop species. Here we combined two image-based methods to estimate plot-level ground cover for three species, from either an ortho-mosaic or undistorted (i.e. corrected for lens and camera effects) images captured by cameras using a low-altitude unmanned aerial vehicle (UAV). Reconstructed point clouds and ortho-mosaics for the whole field were created and a customised image processing workflow was developed to (1) segment the ‘whole-field’ datasets into individual plots, and (2) ‘reverse-calculate’ each plot from each undistorted image. Ground cover for individual plots was calculated by an efficient vegetation segmentation algorithm. For 79\\% of plots, estimated ground cover was greater from the ortho-mosaic than from images, particularly when plants were small, or when older/taller in large plots. While there was a good agreement between the ground cover estimates from ortho-mosaic and images when the target plot was positioned at a near-nadir view near the centre of image (cotton: R2 = 0.97, sorghum: R2 = 0.98, sugarcane: R2 = 0.84), ortho-mosaic estimates were 5\\% greater than estimates from these near-nadir images. Because each plot appeared in multiple images, there were multiple estimates of the ground cover, some of which should be excluded, e.g. when the plot is near edge within an image. Considering only the images with near-nadir view, the reverse calculation provides a more precise estimate of ground cover compared with the ortho-mosaic. The methodology is suitable for high throughput phenotyping for applications in agronomy, physiology and breeding for different crop species and can be extended to provide pixel-level data from other types of cameras including thermal and multi-spectral models.},\n\tlanguage = {en},\n\tnumber = {1},\n\turldate = {2022-04-12},\n\tjournal = {Functional Plant Biology},\n\tauthor = {Duan, Tao and Zheng, Bangyou and Guo, Wei and Ninomiya, Seishi and Guo, Yan and Chapman, Scott C. and Duan, Tao and Zheng, Bangyou and Guo, Wei and Ninomiya, Seishi and Guo, Yan and Chapman, Scott C.},\n\tmonth = nov,\n\tyear = {2016},\n\tpages = {169--183},\n}\n\n
\n
\n\n\n
\n Ground cover is an important physiological trait affecting crop radiation capture, water-use efficiency and grain yield. It is challenging to efficiently measure ground cover with reasonable precision for large numbers of plots, especially in tall crop species. Here we combined two image-based methods to estimate plot-level ground cover for three species, from either an ortho-mosaic or undistorted (i.e. corrected for lens and camera effects) images captured by cameras using a low-altitude unmanned aerial vehicle (UAV). Reconstructed point clouds and ortho-mosaics for the whole field were created and a customised image processing workflow was developed to (1) segment the ‘whole-field’ datasets into individual plots, and (2) ‘reverse-calculate’ each plot from each undistorted image. Ground cover for individual plots was calculated by an efficient vegetation segmentation algorithm. For 79% of plots, estimated ground cover was greater from the ortho-mosaic than from images, particularly when plants were small, or when older/taller in large plots. While there was a good agreement between the ground cover estimates from ortho-mosaic and images when the target plot was positioned at a near-nadir view near the centre of image (cotton: R2 = 0.97, sorghum: R2 = 0.98, sugarcane: R2 = 0.84), ortho-mosaic estimates were 5% greater than estimates from these near-nadir images. Because each plot appeared in multiple images, there were multiple estimates of the ground cover, some of which should be excluded, e.g. when the plot is near edge within an image. Considering only the images with near-nadir view, the reverse calculation provides a more precise estimate of ground cover compared with the ortho-mosaic. The methodology is suitable for high throughput phenotyping for applications in agronomy, physiology and breeding for different crop species and can be extended to provide pixel-level data from other types of cameras including thermal and multi-spectral models.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated characterization of flowering dynamics in rice using field-acquired time-series RGB images.\n \n \n \n \n\n\n \n Guo, W.; Fukatsu, T.; and Ninomiya, S.\n\n\n \n\n\n\n Plant Methods, 11(1): 7. February 2015.\n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{guo_automated_2015,\n\ttitle = {Automated characterization of flowering dynamics in rice using field-acquired time-series {RGB} images},\n\tvolume = {11},\n\tissn = {1746-4811},\n\turl = {https://doi.org/10.1186/s13007-015-0047-9},\n\tdoi = {10.1186/s13007-015-0047-9},\n\tabstract = {Flowering (spikelet anthesis) is one of the most important phenotypic characteristics of paddy rice, and researchers expend efforts to observe flowering timing. Observing flowering is very time-consuming and labor-intensive, because it is still visually performed by humans. An image-based method that automatically detects the flowering of paddy rice is highly desirable. However, varying illumination, diversity of appearance of the flowering parts of the panicles, shape deformation, partial occlusion, and complex background make the development of such a method challenging.},\n\tnumber = {1},\n\turldate = {2022-04-12},\n\tjournal = {Plant Methods},\n\tauthor = {Guo, Wei and Fukatsu, Tokihiro and Ninomiya, Seishi},\n\tmonth = feb,\n\tyear = {2015},\n\tkeywords = {BoVWs, SIFT, SVM, Time-series RGB image},\n\tpages = {7},\n}\n\n
\n
\n\n\n
\n Flowering (spikelet anthesis) is one of the most important phenotypic characteristics of paddy rice, and researchers expend efforts to observe flowering timing. Observing flowering is very time-consuming and labor-intensive, because it is still visually performed by humans. An image-based method that automatically detects the flowering of paddy rice is highly desirable. However, varying illumination, diversity of appearance of the flowering parts of the panicles, shape deformation, partial occlusion, and complex background make the development of such a method challenging.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Illumination invariant segmentation of vegetation for time series wheat images based on decision tree model.\n \n \n \n \n\n\n \n Guo, W.; Rage, U. K.; and Ninomiya, S.\n\n\n \n\n\n\n Computers and Electronics in Agriculture, 96: 58–66. August 2013.\n \n\n\n\n
\n\n\n\n \n \n \"IlluminationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{guo_illumination_2013,\n\ttitle = {Illumination invariant segmentation of vegetation for time series wheat images based on decision tree model},\n\tvolume = {96},\n\tissn = {0168-1699},\n\turl = {https://www.sciencedirect.com/science/article/pii/S0168169913000847},\n\tdoi = {10.1016/j.compag.2013.04.010},\n\tabstract = {Effective and efficient segmentation of vegetation from digital plant images is an actively studied topic in crop phenotyping. Many of the formerly proposed methods showed good performance in the extraction under controlled light conditions but it is still hard to properly extract only vegetation from RGB images taken under natural light condition where the images can contain shadowed and lighted parts with specularly reflected parts of plants. In this paper, we propose a robust method to extract vegetation from the plant images taken under natural light conditions using wheat images. The method is based on a machine learning process, decision tree and image noise reduction filters. We adopted the CART algorithm to create a decision tree in the training process and examined its performance using test images, comparing it with the performances of other methods such as ExG, ExG-ExR and Modified ExG which are widely used recently. The results showed that the accuracy of the vegetation extraction by the proposed method was significantly better than that of the other methods particularly for the images which include strongly shadowed and specularly reflected parts. The proposed method also has an advantage that the same model can be applied to different images without requiring a threshold adjustment for each image.},\n\tlanguage = {en},\n\turldate = {2022-04-12},\n\tjournal = {Computers and Electronics in Agriculture},\n\tauthor = {Guo, Wei and Rage, Uday K. and Ninomiya, Seishi},\n\tmonth = aug,\n\tyear = {2013},\n\tkeywords = {Machine learning, Natural light condition, Non-thresholding, Specular reflection, Vegetation segmentation},\n\tpages = {58--66},\n}\n\n
\n
\n\n\n
\n Effective and efficient segmentation of vegetation from digital plant images is an actively studied topic in crop phenotyping. Many of the formerly proposed methods showed good performance in the extraction under controlled light conditions but it is still hard to properly extract only vegetation from RGB images taken under natural light condition where the images can contain shadowed and lighted parts with specularly reflected parts of plants. In this paper, we propose a robust method to extract vegetation from the plant images taken under natural light conditions using wheat images. The method is based on a machine learning process, decision tree and image noise reduction filters. We adopted the CART algorithm to create a decision tree in the training process and examined its performance using test images, comparing it with the performances of other methods such as ExG, ExG-ExR and Modified ExG which are widely used recently. The results showed that the accuracy of the vegetation extraction by the proposed method was significantly better than that of the other methods particularly for the images which include strongly shadowed and specularly reflected parts. The proposed method also has an advantage that the same model can be applied to different images without requiring a threshold adjustment for each image.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n inproceedings\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n 3D reconstruction of a group of plants by the ground multi-image photogrammetry method.\n \n \n \n \n\n\n \n Drofova, I.; Wang, H.; Guo, W.; Pospisilik, M.; Adamek, M.; and Valouch, J.\n\n\n \n\n\n\n In 2023 33rd International Conference Radioelektronika (RADIOELEKTRONIKA), pages 1–4, Pardubice, Czech Republic, April 2023. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"3DPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{drofova_3d_2023,\n\taddress = {Pardubice, Czech Republic},\n\ttitle = {{3D} reconstruction of a group of plants by the ground multi-image photogrammetry method},\n\tisbn = {9798350398342},\n\turl = {https://ieeexplore.ieee.org/document/10109045/},\n\tdoi = {10.1109/RADIOELEKTRONIKA57919.2023.10109045},\n\turldate = {2023-05-16},\n\tbooktitle = {2023 33rd {International} {Conference} {Radioelektronika} ({RADIOELEKTRONIKA})},\n\tpublisher = {IEEE},\n\tauthor = {Drofova, Irena and Wang, Haozhou and Guo, Wei and Pospisilik, Martin and Adamek, Milan and Valouch, Jan},\n\tmonth = apr,\n\tyear = {2023},\n\tpages = {1--4},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Extending the WILDS Benchmark for Unsupervised Adaptation.\n \n \n \n \n\n\n \n Sagawa, S.; Koh, P. W.; Lee, T.; Gao, I.; Xie, S. M.; Shen, K.; Kumar, A.; Hu, W.; Yasunaga, M.; Marklund, H.; Beery, S.; David, E.; Stavness, I.; Guo, W.; Leskovec, J.; Saenko, K.; Hashimoto, T.; Levine, S.; Finn, C.; and Liang, P.\n\n\n \n\n\n\n In International Conference on Learning Representations, April 2022. \n \n\n\n\n
\n\n\n\n \n \n \"ExtendingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{sagawa_extending_2022,\n\ttitle = {Extending the {WILDS} {Benchmark} for {Unsupervised} {Adaptation}},\n\turl = {https://openreview.net/forum?id=z7p2V6KROOV},\n\tabstract = {Machine learning systems deployed in the wild are often trained on a source distribution but deployed on a different target distribution. Unlabeled data can be a powerful point of leverage for mitigating these distribution shifts, as it is frequently much more available than labeled data and can often be obtained from distributions beyond the source distribution as well. However, existing distribution shift benchmarks with unlabeled data do not reflect the breadth of scenarios that arise in real-world applications. In this work, we present the WILDS 2.0 update, which extends 8 of the 10 datasets in the WILDS benchmark of distribution shifts to include curated unlabeled data that would be realistically obtainable in deployment. These datasets span a wide range of applications (from histology to wildlife conservation), tasks (classification, regression, and detection), and modalities (photos, satellite images, microscope slides, text, molecular graphs). The update maintains consistency with the original WILDS benchmark by using identical labeled training, validation, and test sets, as well as identical evaluation metrics. We systematically benchmark state-of-the-art methods that use unlabeled data, including domain-invariant, self-training, and self-supervised methods, and show that their success on WILDS is limited. To facilitate method development, we provide an open-source package that automates data loading and contains the model architectures and methods used in this paper. Code and leaderboards are available at https://wilds.stanford.edu.},\n\tbooktitle = {International {Conference} on {Learning} {Representations}},\n\tauthor = {Sagawa, Shiori and Koh, Pang Wei and Lee, Tony and Gao, Irena and Xie, Sang Michael and Shen, Kendrick and Kumar, Ananya and Hu, Weihua and Yasunaga, Michihiro and Marklund, Henrik and Beery, Sara and David, Etienne and Stavness, Ian and Guo, Wei and Leskovec, Jure and Saenko, Kate and Hashimoto, Tatsunori and Levine, Sergey and Finn, Chelsea and Liang, Percy},\n\tmonth = apr,\n\tyear = {2022},\n}\n\n
\n
\n\n\n
\n Machine learning systems deployed in the wild are often trained on a source distribution but deployed on a different target distribution. Unlabeled data can be a powerful point of leverage for mitigating these distribution shifts, as it is frequently much more available than labeled data and can often be obtained from distributions beyond the source distribution as well. However, existing distribution shift benchmarks with unlabeled data do not reflect the breadth of scenarios that arise in real-world applications. In this work, we present the WILDS 2.0 update, which extends 8 of the 10 datasets in the WILDS benchmark of distribution shifts to include curated unlabeled data that would be realistically obtainable in deployment. These datasets span a wide range of applications (from histology to wildlife conservation), tasks (classification, regression, and detection), and modalities (photos, satellite images, microscope slides, text, molecular graphs). The update maintains consistency with the original WILDS benchmark by using identical labeled training, validation, and test sets, as well as identical evaluation metrics. We systematically benchmark state-of-the-art methods that use unlabeled data, including domain-invariant, self-training, and self-supervised methods, and show that their success on WILDS is limited. To facilitate method development, we provide an open-source package that automates data loading and contains the model architectures and methods used in this paper. Code and leaderboards are available at https://wilds.stanford.edu.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n WILDS: A Benchmark of in-the-Wild Distribution Shifts.\n \n \n \n \n\n\n \n Koh, P. W.; Sagawa, S.; Marklund, H.; Xie, S. M.; Zhang, M.; Balsubramani, A.; Hu, W.; Yasunaga, M.; Phillips, R. L.; Gao, I.; Lee, T.; David, E.; Stavness, I.; Guo, W.; Earnshaw, B.; Haque, I.; Beery, S. M; Leskovec, J.; Kundaje, A.; Pierson, E.; Levine, S.; Finn, C.; and Liang, P.\n\n\n \n\n\n\n In Meila, M.; and Zhang, T., editor(s), Proceedings of the 38th International Conference on Machine Learning, volume 139, of Proceedings of Machine Learning Research, pages 5637–5664, July 2021. PMLR\n \n\n\n\n
\n\n\n\n \n \n \"WILDS:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{koh_wilds_2021,\n\tseries = {Proceedings of {Machine} {Learning} {Research}},\n\ttitle = {{WILDS}: {A} {Benchmark} of in-the-{Wild} {Distribution} {Shifts}},\n\tvolume = {139},\n\turl = {https://proceedings.mlr.press/v139/koh21a.html},\n\tabstract = {Distribution shifts—where the training distribution differs from the test distribution—can substantially degrade the accuracy of machine learning (ML) systems deployed in the wild. Despite their ubiquity in the real-world deployments, these distribution shifts are under-represented in the datasets widely used in the ML community today. To address this gap, we present WILDS, a curated benchmark of 10 datasets reflecting a diverse range of distribution shifts that naturally arise in real-world applications, such as shifts across hospitals for tumor identification; across camera traps for wildlife monitoring; and across time and location in satellite imaging and poverty mapping. On each dataset, we show that standard training yields substantially lower out-of-distribution than in-distribution performance. This gap remains even with models trained by existing methods for tackling distribution shifts, underscoring the need for new methods for training models that are more robust to the types of distribution shifts that arise in practice. To facilitate method development, we provide an open-source package that automates dataset loading, contains default model architectures and hyperparameters, and standardizes evaluations. The full paper, code, and leaderboards are available at https://wilds.stanford.edu.},\n\tbooktitle = {Proceedings of the 38th {International} {Conference} on {Machine} {Learning}},\n\tpublisher = {PMLR},\n\tauthor = {Koh, Pang Wei and Sagawa, Shiori and Marklund, Henrik and Xie, Sang Michael and Zhang, Marvin and Balsubramani, Akshay and Hu, Weihua and Yasunaga, Michihiro and Phillips, Richard Lanas and Gao, Irena and Lee, Tony and David, Etienne and Stavness, Ian and Guo, Wei and Earnshaw, Berton and Haque, Imran and Beery, Sara M and Leskovec, Jure and Kundaje, Anshul and Pierson, Emma and Levine, Sergey and Finn, Chelsea and Liang, Percy},\n\teditor = {Meila, Marina and Zhang, Tong},\n\tmonth = jul,\n\tyear = {2021},\n\tpages = {5637--5664},\n}\n\n
\n
\n\n\n
\n Distribution shifts—where the training distribution differs from the test distribution—can substantially degrade the accuracy of machine learning (ML) systems deployed in the wild. Despite their ubiquity in the real-world deployments, these distribution shifts are under-represented in the datasets widely used in the ML community today. To address this gap, we present WILDS, a curated benchmark of 10 datasets reflecting a diverse range of distribution shifts that naturally arise in real-world applications, such as shifts across hospitals for tumor identification; across camera traps for wildlife monitoring; and across time and location in satellite imaging and poverty mapping. On each dataset, we show that standard training yields substantially lower out-of-distribution than in-distribution performance. This gap remains even with models trained by existing methods for tackling distribution shifts, underscoring the need for new methods for training models that are more robust to the types of distribution shifts that arise in practice. To facilitate method development, we provide an open-source package that automates dataset loading, contains default model architectures and hyperparameters, and standardizes evaluations. The full paper, code, and leaderboards are available at https://wilds.stanford.edu.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Domain Adaptation for Plant Organ Detection with Style Transfer.\n \n \n \n \n\n\n \n James, C.; Gu, Y.; Chapman, S.; Guo, W.; David, E.; Madec, S.; Potgieter, A.; and Eriksson, A.\n\n\n \n\n\n\n In 2021 Digital Image Computing: Techniques and Applications (DICTA), pages 1–9, Gold Coast, Australia, November 2021. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"DomainPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{james_domain_2021,\n\taddress = {Gold Coast, Australia},\n\ttitle = {Domain {Adaptation} for {Plant} {Organ} {Detection} with {Style} {Transfer}},\n\tisbn = {978-1-66541-709-9},\n\turl = {https://ieeexplore.ieee.org/document/9647293/},\n\tdoi = {10.1109/DICTA52665.2021.9647293},\n\tabstract = {Deep learning based detection of sorghum panicles has been proposed to replace manual counting in field trials. However, model performance is highly sensitive to domain shift between training datasets associated with differences in genotypes, field conditions, and various lighting conditions. As labelling such datasets is expensive and laborious, we propose a pipeline of Contrastive Unpaired Translation (CUT) based domain adaptation method to improve detection performance in new datasets, including for completely different crop species. Firstly, original dataset is translated to other styles using CUT trained on unlabelled datasets from other domains. Then labels are corrected after synthesis of the new domain dataset. Finally, detectors are retrained on the synthesized dataset. Experiments show that, in case of sorghum panicles, the accuracy of the models when trained with synthetic images improve by fifteen to twenty percent. Furthermore, the models are more robust towards change in prediction thresholds. Hence, demonstrating the effectiveness of the pipeline.},\n\turldate = {2022-04-12},\n\tbooktitle = {2021 {Digital} {Image} {Computing}: {Techniques} and {Applications} ({DICTA})},\n\tpublisher = {IEEE},\n\tauthor = {James, Chrisbin and Gu, Yanyang and Chapman, Scott and Guo, Wei and David, Etienne and Madec, Simon and Potgieter, Andries and Eriksson, Anders},\n\tmonth = nov,\n\tyear = {2021},\n\tpages = {1--9},\n}\n\n
\n
\n\n\n
\n Deep learning based detection of sorghum panicles has been proposed to replace manual counting in field trials. However, model performance is highly sensitive to domain shift between training datasets associated with differences in genotypes, field conditions, and various lighting conditions. As labelling such datasets is expensive and laborious, we propose a pipeline of Contrastive Unpaired Translation (CUT) based domain adaptation method to improve detection performance in new datasets, including for completely different crop species. Firstly, original dataset is translated to other styles using CUT trained on unlabelled datasets from other domains. Then labels are corrected after synthesis of the new domain dataset. Finally, detectors are retrained on the synthesized dataset. Experiments show that, in case of sorghum panicles, the accuracy of the models when trained with synthetic images improve by fifteen to twenty percent. Furthermore, the models are more robust towards change in prediction thresholds. Hence, demonstrating the effectiveness of the pipeline.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n An adaptive supervision framework for active learning in object detection.\n \n \n \n\n\n \n Desai, S.; Lagandula, A.; Guo, W.; Ninomiya, S.; and Balasubramanian, V.\n\n\n \n\n\n\n In 30th British Machine Vision Conference(BMVC 2019), Cardiff, September 2019. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{desai_adaptive_2019,\n\taddress = {Cardiff},\n\ttitle = {An adaptive supervision framework for active learning in object detection},\n\tabstract = {Active learning approaches in computer vision generally involve querying strong labels for data. However, previous works have shown that weak supervision can be effective in training models for vision tasks while greatly reducing annotation costs. Using this knowledge, we propose an adaptive supervision framework for active learning and demonstrate its effectiveness on the task of object detection. Instead of directly querying bounding box annotations (strong labels) for the most informative samples, we first query weak labels and optimize the model. Using a switching condition, the required supervision level can be increased. Our framework requires little to no change in model architecture. Our extensive experiments show that the proposed framework can be used to train good generalizable models with much lesser annotation costs than the state of the art active learning approaches for object detection. © 2019. The copyright of this document resides with its authors.},\n\tlanguage = {English},\n\tbooktitle = {30th {British} {Machine} {Vision} {Conference}({BMVC} 2019)},\n\tauthor = {Desai, S.V. and Lagandula, A.C. and Guo, W. and Ninomiya, S. and Balasubramanian, V.N.},\n\tmonth = sep,\n\tyear = {2019},\n}\n\n
\n
\n\n\n
\n Active learning approaches in computer vision generally involve querying strong labels for data. However, previous works have shown that weak supervision can be effective in training models for vision tasks while greatly reducing annotation costs. Using this knowledge, we propose an adaptive supervision framework for active learning and demonstrate its effectiveness on the task of object detection. Instead of directly querying bounding box annotations (strong labels) for the most informative samples, we first query weak labels and optimize the model. Using a switching condition, the required supervision level can be increased. Our framework requires little to no change in model architecture. Our extensive experiments show that the proposed framework can be used to train good generalizable models with much lesser annotation costs than the state of the art active learning approaches for object detection. © 2019. The copyright of this document resides with its authors.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n misc\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n 農業におけるビッグデータ時代の到来と課題 ~東京大学・二宮正士特任教授(前編).\n \n \n \n \n\n\n \n 二宮, 正.\n\n\n \n\n\n\n May 2019.\n \n\n\n\n
\n\n\n\n \n \n \"農業におけるビッグデータ時代の到来と課題Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{__2019-1,\n\ttitle = {農業におけるビッグデータ時代の到来と課題 ~東京大学・二宮正士特任教授(前編)},\n\turl = {https://smartagri-jp.com/smartagri/256},\n\tlanguage = {Japanese},\n\tauthor = {二宮, 正士},\n\tmonth = may,\n\tyear = {2019},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n 日本の近代農業150年の蓄積データを活かすには ~東京大学・二宮正士特任教授(後編).\n \n \n \n \n\n\n \n 二宮, 正.\n\n\n \n\n\n\n May 2019.\n \n\n\n\n
\n\n\n\n \n \n \"日本の近代農業150年の蓄積データを活かすにはPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{_150_2019,\n\ttitle = {日本の近代農業150年の蓄積データを活かすには ~東京大学・二宮正士特任教授(後編)},\n\turl = {https://smartagri-jp.com/smartagri/257},\n\tlanguage = {Japanese},\n\tauthor = {二宮, 正士},\n\tmonth = may,\n\tyear = {2019},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n 情報科学との協働で栽培技術を革新.\n \n \n \n \n\n\n \n 二宮, 正.\n\n\n \n\n\n\n June 2019.\n \n\n\n\n
\n\n\n\n \n \n \"情報科学との協働で栽培技術を革新Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{__2019,\n\ttitle = {情報科学との協働で栽培技術を革新},\n\turl = {https://www.jst.go.jp/pr/jst-news/backnumber/2019/201906/pdf/2019_06_p03-04.pdf},\n\tauthor = {二宮, 正士},\n\tmonth = jun,\n\tyear = {2019},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n 東京オリンピックで食材が足りない?!――21世紀の持続的農業生産.\n \n \n \n \n\n\n \n 二宮, 正.\n\n\n \n\n\n\n August 2016.\n \n\n\n\n
\n\n\n\n \n \n \"東京オリンピックで食材が足りない?!――21世紀の持続的農業生産Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{_21_2016,\n\ttitle = {東京オリンピックで食材が足りない?!――21世紀の持続的農業生産},\n\turl = {https://synodos.jp/opinion/science/17742/},\n\tauthor = {二宮, 正士},\n\tmonth = aug,\n\tyear = {2016},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n patent\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Crown identification device, identification method, program, and recording medium.\n \n \n \n\n\n \n Sekiya, K.; Ochiai, H.; Nagoshi, Y.; Guo, W.; Mu, Y.; Hirafuji, M.; Ninomiya, S.; and Takata, D.\n\n\n \n\n\n\n February 2023.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@patent{sekiya_crown_2023,\n\ttitle = {Crown identification device, identification method, program, and recording medium},\n\tnumber = {11594020},\n\tauthor = {Sekiya, Kazuki and Ochiai, Hiroaki and Nagoshi, Yurika and Guo, Wei and Mu, Yue and Hirafuji, Masayuki and Ninomiya, Seishi and Takata, Daisuke},\n\tmonth = feb,\n\tyear = {2023},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Device for collecting breeding data in farm field, device for analyzing feature in breeding, method for collecting breeding data in farm field, program, and recording medium.\n \n \n \n\n\n \n Sekiya, K.; Taniguchi, S.; Kamiya, T.; Ochiai, H.; Nagoshi, Y.; Guo, W.; Mu, Y.; Hirafuji, M.; Ninomiya, S.; Takata, D.; and others\n\n\n \n\n\n\n October 2022.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@patent{sekiya_device_2022,\n\ttitle = {Device for collecting breeding data in farm field, device for analyzing feature in breeding, method for collecting breeding data in farm field, program, and recording medium},\n\tnationality = {US},\n\tnumber = {11462008},\n\tauthor = {Sekiya, Kazuki and Taniguchi, Shigeharu and Kamiya, Toshiyuki and Ochiai, Hiroaki and Nagoshi, Yurika and Guo, Wei and Mu, Yue and Hirafuji, Masayuki and Ninomiya, Seishi and Takata, Daisuke and {others}},\n\tmonth = oct,\n\tyear = {2022},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n phdthesis\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Studies on high-throughput phenotyping by use of time series crop images taken under natural environments.\n \n \n \n \n\n\n \n Guo, W.\n\n\n \n\n\n\n Ph.D. Thesis, University of Tokyo, March 2014.\n \n\n\n\n
\n\n\n\n \n \n \"StudiesPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 8 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{guo_studies_2014,\n\ttitle = {Studies on high-throughput phenotyping by use of time series crop images taken under natural environments},\n\turl = {https://repository.dl.itc.u-tokyo.ac.jp/records/6924#.YlUE9shBzUQ},\n\tlanguage = {EN},\n\tschool = {University of Tokyo},\n\tauthor = {Guo, Wei},\n\tmonth = mar,\n\tyear = {2014},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n 植物生長のモデリングに関する基礎的研究.\n \n \n \n \n\n\n \n 平藤, 雅.\n\n\n \n\n\n\n Ph.D. Thesis, 東京大学, 1995.\n \n\n\n\n
\n\n\n\n \n \n \"植物生長のモデリングに関する基礎的研究Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{___1995,\n\ttitle = {植物生長のモデリングに関する基礎的研究},\n\turl = {https://repository.dl.itc.u-tokyo.ac.jp/records/3157},\n\tschool = {東京大学},\n\tauthor = {平藤, 雅之},\n\tyear = {1995},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n ダイズにおけるサーカデアンリズムとその種内変異に関する研究-特に葉の就眠運動に着目して.\n \n \n \n \n\n\n \n 二宮, 正.\n\n\n \n\n\n\n Ph.D. Thesis, 東京大学, 1982.\n \n\n\n\n
\n\n\n\n \n \n \"ダイズにおけるサーカデアンリズムとその種内変異に関する研究-特に葉の就眠運動に着目してPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{_-_1982,\n\ttitle = {ダイズにおけるサーカデアンリズムとその種内変異に関する研究-特に葉の就眠運動に着目して},\n\turl = {http://gakui.dl.itc.u-tokyo.ac.jp/cgi-bin/gazo.cgi?no=105856},\n\tlanguage = {JPN},\n\tschool = {東京大学},\n\tauthor = {二宮,正士},\n\tyear = {1982},\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);